LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/display/amdgpu_dm - amdgpu_dm.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 3980 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 170 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2015 Advanced Micro Devices, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  * Authors: AMD
      23             :  *
      24             :  */
      25             : 
      26             : /* The caprices of the preprocessor require that this be declared right here */
      27             : #define CREATE_TRACE_POINTS
      28             : 
      29             : #include "dm_services_types.h"
      30             : #include "dc.h"
      31             : #include "dc_link_dp.h"
      32             : #include "link_enc_cfg.h"
      33             : #include "dc/inc/core_types.h"
      34             : #include "dal_asic_id.h"
      35             : #include "dmub/dmub_srv.h"
      36             : #include "dc/inc/hw/dmcu.h"
      37             : #include "dc/inc/hw/abm.h"
      38             : #include "dc/dc_dmub_srv.h"
      39             : #include "dc/dc_edid_parser.h"
      40             : #include "dc/dc_stat.h"
      41             : #include "amdgpu_dm_trace.h"
      42             : 
      43             : #include "vid.h"
      44             : #include "amdgpu.h"
      45             : #include "amdgpu_display.h"
      46             : #include "amdgpu_ucode.h"
      47             : #include "atom.h"
      48             : #include "amdgpu_dm.h"
      49             : #include "amdgpu_dm_plane.h"
      50             : #include "amdgpu_dm_crtc.h"
      51             : #ifdef CONFIG_DRM_AMD_DC_HDCP
      52             : #include "amdgpu_dm_hdcp.h"
      53             : #include <drm/display/drm_hdcp_helper.h>
      54             : #endif
      55             : #include "amdgpu_pm.h"
      56             : #include "amdgpu_atombios.h"
      57             : 
      58             : #include "amd_shared.h"
      59             : #include "amdgpu_dm_irq.h"
      60             : #include "dm_helpers.h"
      61             : #include "amdgpu_dm_mst_types.h"
      62             : #if defined(CONFIG_DEBUG_FS)
      63             : #include "amdgpu_dm_debugfs.h"
      64             : #endif
      65             : #include "amdgpu_dm_psr.h"
      66             : 
      67             : #include "ivsrcid/ivsrcid_vislands30.h"
      68             : 
      69             : #include "i2caux_interface.h"
      70             : #include <linux/module.h>
      71             : #include <linux/moduleparam.h>
      72             : #include <linux/types.h>
      73             : #include <linux/pm_runtime.h>
      74             : #include <linux/pci.h>
      75             : #include <linux/firmware.h>
      76             : #include <linux/component.h>
      77             : #include <linux/dmi.h>
      78             : 
      79             : #include <drm/display/drm_dp_mst_helper.h>
      80             : #include <drm/display/drm_hdmi_helper.h>
      81             : #include <drm/drm_atomic.h>
      82             : #include <drm/drm_atomic_uapi.h>
      83             : #include <drm/drm_atomic_helper.h>
      84             : #include <drm/drm_fb_helper.h>
      85             : #include <drm/drm_fourcc.h>
      86             : #include <drm/drm_edid.h>
      87             : #include <drm/drm_vblank.h>
      88             : #include <drm/drm_audio_component.h>
      89             : 
      90             : #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
      91             : 
      92             : #include "dcn/dcn_1_0_offset.h"
      93             : #include "dcn/dcn_1_0_sh_mask.h"
      94             : #include "soc15_hw_ip.h"
      95             : #include "soc15_common.h"
      96             : #include "vega10_ip_offset.h"
      97             : 
      98             : #include "soc15_common.h"
      99             : 
     100             : #include "gc/gc_11_0_0_offset.h"
     101             : #include "gc/gc_11_0_0_sh_mask.h"
     102             : 
     103             : #include "modules/inc/mod_freesync.h"
     104             : #include "modules/power/power_helpers.h"
     105             : #include "modules/inc/mod_info_packet.h"
     106             : 
     107             : #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
     108             : MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
     109             : #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
     110             : MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
     111             : #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
     112             : MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
     113             : #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
     114             : MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
     115             : #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
     116             : MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
     117             : #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
     118             : MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
     119             : #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
     120             : MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
     121             : #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
     122             : MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
     123             : #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
     124             : MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
     125             : #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
     126             : MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
     127             : #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
     128             : MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
     129             : 
     130             : #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
     131             : MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
     132             : #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
     133             : MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
     134             : 
     135             : #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
     136             : MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
     137             : 
     138             : #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
     139             : MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
     140             : 
     141             : /* Number of bytes in PSP header for firmware. */
     142             : #define PSP_HEADER_BYTES 0x100
     143             : 
     144             : /* Number of bytes in PSP footer for firmware. */
     145             : #define PSP_FOOTER_BYTES 0x100
     146             : 
     147             : /**
     148             :  * DOC: overview
     149             :  *
     150             :  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
     151             :  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
     152             :  * requests into DC requests, and DC responses into DRM responses.
     153             :  *
     154             :  * The root control structure is &struct amdgpu_display_manager.
     155             :  */
     156             : 
     157             : /* basic init/fini API */
     158             : static int amdgpu_dm_init(struct amdgpu_device *adev);
     159             : static void amdgpu_dm_fini(struct amdgpu_device *adev);
     160             : static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
     161             : 
     162             : static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
     163             : {
     164             :         switch (link->dpcd_caps.dongle_type) {
     165             :         case DISPLAY_DONGLE_NONE:
     166             :                 return DRM_MODE_SUBCONNECTOR_Native;
     167             :         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
     168             :                 return DRM_MODE_SUBCONNECTOR_VGA;
     169             :         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
     170             :         case DISPLAY_DONGLE_DP_DVI_DONGLE:
     171             :                 return DRM_MODE_SUBCONNECTOR_DVID;
     172             :         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
     173             :         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
     174             :                 return DRM_MODE_SUBCONNECTOR_HDMIA;
     175             :         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
     176             :         default:
     177             :                 return DRM_MODE_SUBCONNECTOR_Unknown;
     178             :         }
     179             : }
     180             : 
     181           0 : static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
     182             : {
     183           0 :         struct dc_link *link = aconnector->dc_link;
     184           0 :         struct drm_connector *connector = &aconnector->base;
     185           0 :         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
     186             : 
     187           0 :         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
     188             :                 return;
     189             : 
     190           0 :         if (aconnector->dc_sink)
     191           0 :                 subconnector = get_subconnector_type(link);
     192             : 
     193           0 :         drm_object_property_set_value(&connector->base,
     194           0 :                         connector->dev->mode_config.dp_subconnector_property,
     195             :                         subconnector);
     196             : }
     197             : 
     198             : /*
     199             :  * initializes drm_device display related structures, based on the information
     200             :  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
     201             :  * drm_encoder, drm_mode_config
     202             :  *
     203             :  * Returns 0 on success
     204             :  */
     205             : static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
     206             : /* removes and deallocates the drm structures, created by the above function */
     207             : static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
     208             : 
     209             : static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
     210             :                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
     211             :                                     uint32_t link_index,
     212             :                                     struct amdgpu_encoder *amdgpu_encoder);
     213             : static int amdgpu_dm_encoder_init(struct drm_device *dev,
     214             :                                   struct amdgpu_encoder *aencoder,
     215             :                                   uint32_t link_index);
     216             : 
     217             : static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
     218             : 
     219             : static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
     220             : 
     221             : static int amdgpu_dm_atomic_check(struct drm_device *dev,
     222             :                                   struct drm_atomic_state *state);
     223             : 
     224             : static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
     225             : static void handle_hpd_rx_irq(void *param);
     226             : 
     227             : static bool
     228             : is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
     229             :                                  struct drm_crtc_state *new_crtc_state);
     230             : /*
     231             :  * dm_vblank_get_counter
     232             :  *
     233             :  * @brief
     234             :  * Get counter for number of vertical blanks
     235             :  *
     236             :  * @param
     237             :  * struct amdgpu_device *adev - [in] desired amdgpu device
     238             :  * int disp_idx - [in] which CRTC to get the counter from
     239             :  *
     240             :  * @return
     241             :  * Counter for vertical blanks
     242             :  */
     243           0 : static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
     244             : {
     245           0 :         if (crtc >= adev->mode_info.num_crtc)
     246             :                 return 0;
     247             :         else {
     248           0 :                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
     249             : 
     250           0 :                 if (acrtc->dm_irq_params.stream == NULL) {
     251           0 :                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
     252             :                                   crtc);
     253           0 :                         return 0;
     254             :                 }
     255             : 
     256           0 :                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
     257             :         }
     258             : }
     259             : 
     260           0 : static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
     261             :                                   u32 *vbl, u32 *position)
     262             : {
     263             :         uint32_t v_blank_start, v_blank_end, h_position, v_position;
     264             : 
     265           0 :         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
     266             :                 return -EINVAL;
     267             :         else {
     268           0 :                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
     269             : 
     270           0 :                 if (acrtc->dm_irq_params.stream ==  NULL) {
     271           0 :                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
     272             :                                   crtc);
     273           0 :                         return 0;
     274             :                 }
     275             : 
     276             :                 /*
     277             :                  * TODO rework base driver to use values directly.
     278             :                  * for now parse it back into reg-format
     279             :                  */
     280           0 :                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
     281             :                                          &v_blank_start,
     282             :                                          &v_blank_end,
     283             :                                          &h_position,
     284             :                                          &v_position);
     285             : 
     286           0 :                 *position = v_position | (h_position << 16);
     287           0 :                 *vbl = v_blank_start | (v_blank_end << 16);
     288             :         }
     289             : 
     290           0 :         return 0;
     291             : }
     292             : 
     293           0 : static bool dm_is_idle(void *handle)
     294             : {
     295             :         /* XXX todo */
     296           0 :         return true;
     297             : }
     298             : 
     299           0 : static int dm_wait_for_idle(void *handle)
     300             : {
     301             :         /* XXX todo */
     302           0 :         return 0;
     303             : }
     304             : 
     305           0 : static bool dm_check_soft_reset(void *handle)
     306             : {
     307           0 :         return false;
     308             : }
     309             : 
     310           0 : static int dm_soft_reset(void *handle)
     311             : {
     312             :         /* XXX todo */
     313           0 :         return 0;
     314             : }
     315             : 
     316             : static struct amdgpu_crtc *
     317           0 : get_crtc_by_otg_inst(struct amdgpu_device *adev,
     318             :                      int otg_inst)
     319             : {
     320           0 :         struct drm_device *dev = adev_to_drm(adev);
     321             :         struct drm_crtc *crtc;
     322             :         struct amdgpu_crtc *amdgpu_crtc;
     323             : 
     324           0 :         if (WARN_ON(otg_inst == -1))
     325           0 :                 return adev->mode_info.crtcs[0];
     326             : 
     327           0 :         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
     328           0 :                 amdgpu_crtc = to_amdgpu_crtc(crtc);
     329             : 
     330           0 :                 if (amdgpu_crtc->otg_inst == otg_inst)
     331             :                         return amdgpu_crtc;
     332             :         }
     333             : 
     334             :         return NULL;
     335             : }
     336             : 
     337           0 : static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
     338             :                                               struct dm_crtc_state *new_state)
     339             : {
     340           0 :         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
     341             :                 return true;
     342           0 :         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
     343             :                 return true;
     344             :         else
     345           0 :                 return false;
     346             : }
     347             : 
     348             : /**
     349             :  * dm_pflip_high_irq() - Handle pageflip interrupt
     350             :  * @interrupt_params: ignored
     351             :  *
     352             :  * Handles the pageflip interrupt by notifying all interested parties
     353             :  * that the pageflip has been completed.
     354             :  */
     355           0 : static void dm_pflip_high_irq(void *interrupt_params)
     356             : {
     357             :         struct amdgpu_crtc *amdgpu_crtc;
     358           0 :         struct common_irq_params *irq_params = interrupt_params;
     359           0 :         struct amdgpu_device *adev = irq_params->adev;
     360             :         unsigned long flags;
     361             :         struct drm_pending_vblank_event *e;
     362             :         uint32_t vpos, hpos, v_blank_start, v_blank_end;
     363             :         bool vrr_active;
     364             : 
     365           0 :         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
     366             : 
     367             :         /* IRQ could occur when in initial stage */
     368             :         /* TODO work and BO cleanup */
     369           0 :         if (amdgpu_crtc == NULL) {
     370             :                 DC_LOG_PFLIP("CRTC is null, returning.\n");
     371           0 :                 return;
     372             :         }
     373             : 
     374           0 :         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
     375             : 
     376           0 :         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
     377           0 :                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
     378             :                                                  amdgpu_crtc->pflip_status,
     379             :                                                  AMDGPU_FLIP_SUBMITTED,
     380             :                                                  amdgpu_crtc->crtc_id,
     381             :                                                  amdgpu_crtc);
     382           0 :                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
     383             :                 return;
     384             :         }
     385             : 
     386             :         /* page flip completed. */
     387           0 :         e = amdgpu_crtc->event;
     388           0 :         amdgpu_crtc->event = NULL;
     389             : 
     390           0 :         WARN_ON(!e);
     391             : 
     392           0 :         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
     393             : 
     394             :         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
     395           0 :         if (!vrr_active ||
     396           0 :             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
     397           0 :                                       &v_blank_end, &hpos, &vpos) ||
     398           0 :             (vpos < v_blank_start)) {
     399             :                 /* Update to correct count and vblank timestamp if racing with
     400             :                  * vblank irq. This also updates to the correct vblank timestamp
     401             :                  * even in VRR mode, as scanout is past the front-porch atm.
     402             :                  */
     403           0 :                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
     404             : 
     405             :                 /* Wake up userspace by sending the pageflip event with proper
     406             :                  * count and timestamp of vblank of flip completion.
     407             :                  */
     408           0 :                 if (e) {
     409           0 :                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
     410             : 
     411             :                         /* Event sent, so done with vblank for this flip */
     412           0 :                         drm_crtc_vblank_put(&amdgpu_crtc->base);
     413             :                 }
     414           0 :         } else if (e) {
     415             :                 /* VRR active and inside front-porch: vblank count and
     416             :                  * timestamp for pageflip event will only be up to date after
     417             :                  * drm_crtc_handle_vblank() has been executed from late vblank
     418             :                  * irq handler after start of back-porch (vline 0). We queue the
     419             :                  * pageflip event for send-out by drm_crtc_handle_vblank() with
     420             :                  * updated timestamp and count, once it runs after us.
     421             :                  *
     422             :                  * We need to open-code this instead of using the helper
     423             :                  * drm_crtc_arm_vblank_event(), as that helper would
     424             :                  * call drm_crtc_accurate_vblank_count(), which we must
     425             :                  * not call in VRR mode while we are in front-porch!
     426             :                  */
     427             : 
     428             :                 /* sequence will be replaced by real count during send-out. */
     429           0 :                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
     430           0 :                 e->pipe = amdgpu_crtc->crtc_id;
     431             : 
     432           0 :                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
     433           0 :                 e = NULL;
     434             :         }
     435             : 
     436             :         /* Keep track of vblank of this flip for flip throttling. We use the
     437             :          * cooked hw counter, as that one incremented at start of this vblank
     438             :          * of pageflip completion, so last_flip_vblank is the forbidden count
     439             :          * for queueing new pageflips if vsync + VRR is enabled.
     440             :          */
     441           0 :         amdgpu_crtc->dm_irq_params.last_flip_vblank =
     442           0 :                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
     443             : 
     444           0 :         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
     445           0 :         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
     446             : 
     447             :         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
     448             :                      amdgpu_crtc->crtc_id, amdgpu_crtc,
     449             :                      vrr_active, (int) !e);
     450             : }
     451             : 
     452           0 : static void dm_vupdate_high_irq(void *interrupt_params)
     453             : {
     454           0 :         struct common_irq_params *irq_params = interrupt_params;
     455           0 :         struct amdgpu_device *adev = irq_params->adev;
     456             :         struct amdgpu_crtc *acrtc;
     457             :         struct drm_device *drm_dev;
     458             :         struct drm_vblank_crtc *vblank;
     459             :         ktime_t frame_duration_ns, previous_timestamp;
     460             :         unsigned long flags;
     461             :         int vrr_active;
     462             : 
     463           0 :         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
     464             : 
     465           0 :         if (acrtc) {
     466           0 :                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
     467           0 :                 drm_dev = acrtc->base.dev;
     468           0 :                 vblank = &drm_dev->vblank[acrtc->base.index];
     469           0 :                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
     470           0 :                 frame_duration_ns = vblank->time - previous_timestamp;
     471             : 
     472           0 :                 if (frame_duration_ns > 0) {
     473           0 :                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
     474             :                                                 frame_duration_ns,
     475           0 :                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
     476           0 :                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
     477             :                 }
     478             : 
     479             :                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
     480             :                               acrtc->crtc_id,
     481             :                               vrr_active);
     482             : 
     483             :                 /* Core vblank handling is done here after end of front-porch in
     484             :                  * vrr mode, as vblank timestamping will give valid results
     485             :                  * while now done after front-porch. This will also deliver
     486             :                  * page-flip completion events that have been queued to us
     487             :                  * if a pageflip happened inside front-porch.
     488             :                  */
     489           0 :                 if (vrr_active) {
     490           0 :                         dm_crtc_handle_vblank(acrtc);
     491             : 
     492             :                         /* BTR processing for pre-DCE12 ASICs */
     493           0 :                         if (acrtc->dm_irq_params.stream &&
     494           0 :                             adev->family < AMDGPU_FAMILY_AI) {
     495           0 :                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
     496           0 :                                 mod_freesync_handle_v_update(
     497             :                                     adev->dm.freesync_module,
     498           0 :                                     acrtc->dm_irq_params.stream,
     499             :                                     &acrtc->dm_irq_params.vrr_params);
     500             : 
     501           0 :                                 dc_stream_adjust_vmin_vmax(
     502             :                                     adev->dm.dc,
     503             :                                     acrtc->dm_irq_params.stream,
     504             :                                     &acrtc->dm_irq_params.vrr_params.adjust);
     505           0 :                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
     506             :                         }
     507             :                 }
     508             :         }
     509           0 : }
     510             : 
     511             : /**
     512             :  * dm_crtc_high_irq() - Handles CRTC interrupt
     513             :  * @interrupt_params: used for determining the CRTC instance
     514             :  *
     515             :  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
     516             :  * event handler.
     517             :  */
     518           0 : static void dm_crtc_high_irq(void *interrupt_params)
     519             : {
     520           0 :         struct common_irq_params *irq_params = interrupt_params;
     521           0 :         struct amdgpu_device *adev = irq_params->adev;
     522             :         struct amdgpu_crtc *acrtc;
     523             :         unsigned long flags;
     524             :         int vrr_active;
     525             : 
     526           0 :         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
     527           0 :         if (!acrtc)
     528             :                 return;
     529             : 
     530           0 :         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
     531             : 
     532             :         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
     533             :                       vrr_active, acrtc->dm_irq_params.active_planes);
     534             : 
     535             :         /**
     536             :          * Core vblank handling at start of front-porch is only possible
     537             :          * in non-vrr mode, as only there vblank timestamping will give
     538             :          * valid results while done in front-porch. Otherwise defer it
     539             :          * to dm_vupdate_high_irq after end of front-porch.
     540             :          */
     541           0 :         if (!vrr_active)
     542           0 :                 dm_crtc_handle_vblank(acrtc);
     543             : 
     544             :         /**
     545             :          * Following stuff must happen at start of vblank, for crc
     546             :          * computation and below-the-range btr support in vrr mode.
     547             :          */
     548             :         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
     549             : 
     550             :         /* BTR updates need to happen before VUPDATE on Vega and above. */
     551           0 :         if (adev->family < AMDGPU_FAMILY_AI)
     552             :                 return;
     553             : 
     554           0 :         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
     555             : 
     556           0 :         if (acrtc->dm_irq_params.stream &&
     557           0 :             acrtc->dm_irq_params.vrr_params.supported &&
     558           0 :             acrtc->dm_irq_params.freesync_config.state ==
     559             :                     VRR_STATE_ACTIVE_VARIABLE) {
     560           0 :                 mod_freesync_handle_v_update(adev->dm.freesync_module,
     561             :                                              acrtc->dm_irq_params.stream,
     562             :                                              &acrtc->dm_irq_params.vrr_params);
     563             : 
     564           0 :                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
     565             :                                            &acrtc->dm_irq_params.vrr_params.adjust);
     566             :         }
     567             : 
     568             :         /*
     569             :          * If there aren't any active_planes then DCH HUBP may be clock-gated.
     570             :          * In that case, pageflip completion interrupts won't fire and pageflip
     571             :          * completion events won't get delivered. Prevent this by sending
     572             :          * pending pageflip events from here if a flip is still pending.
     573             :          *
     574             :          * If any planes are enabled, use dm_pflip_high_irq() instead, to
     575             :          * avoid race conditions between flip programming and completion,
     576             :          * which could cause too early flip completion events.
     577             :          */
     578           0 :         if (adev->family >= AMDGPU_FAMILY_RV &&
     579           0 :             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
     580           0 :             acrtc->dm_irq_params.active_planes == 0) {
     581           0 :                 if (acrtc->event) {
     582           0 :                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
     583           0 :                         acrtc->event = NULL;
     584           0 :                         drm_crtc_vblank_put(&acrtc->base);
     585             :                 }
     586           0 :                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
     587             :         }
     588             : 
     589           0 :         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
     590             : }
     591             : 
     592             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
     593             : /**
     594             :  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
     595             :  * DCN generation ASICs
     596             :  * @interrupt_params: interrupt parameters
     597             :  *
     598             :  * Used to set crc window/read out crc value at vertical line 0 position
     599             :  */
     600             : static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
     601             : {
     602             :         struct common_irq_params *irq_params = interrupt_params;
     603             :         struct amdgpu_device *adev = irq_params->adev;
     604             :         struct amdgpu_crtc *acrtc;
     605             : 
     606             :         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
     607             : 
     608             :         if (!acrtc)
     609             :                 return;
     610             : 
     611             :         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
     612             : }
     613             : #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
     614             : 
     615             : /**
     616             :  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
     617             :  * @adev: amdgpu_device pointer
     618             :  * @notify: dmub notification structure
     619             :  *
     620             :  * Dmub AUX or SET_CONFIG command completion processing callback
     621             :  * Copies dmub notification to DM which is to be read by AUX command.
     622             :  * issuing thread and also signals the event to wake up the thread.
     623             :  */
     624           0 : static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
     625             :                                         struct dmub_notification *notify)
     626             : {
     627           0 :         if (adev->dm.dmub_notify)
     628           0 :                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
     629           0 :         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
     630           0 :                 complete(&adev->dm.dmub_aux_transfer_done);
     631           0 : }
     632             : 
     633             : /**
     634             :  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
     635             :  * @adev: amdgpu_device pointer
     636             :  * @notify: dmub notification structure
     637             :  *
     638             :  * Dmub Hpd interrupt processing callback. Gets displayindex through the
     639             :  * ink index and calls helper to do the processing.
     640             :  */
     641           0 : static void dmub_hpd_callback(struct amdgpu_device *adev,
     642             :                               struct dmub_notification *notify)
     643             : {
     644             :         struct amdgpu_dm_connector *aconnector;
     645           0 :         struct amdgpu_dm_connector *hpd_aconnector = NULL;
     646             :         struct drm_connector *connector;
     647             :         struct drm_connector_list_iter iter;
     648             :         struct dc_link *link;
     649           0 :         uint8_t link_index = 0;
     650             :         struct drm_device *dev;
     651             : 
     652           0 :         if (adev == NULL)
     653           0 :                 return;
     654             : 
     655           0 :         if (notify == NULL) {
     656           0 :                 DRM_ERROR("DMUB HPD callback notification was NULL");
     657           0 :                 return;
     658             :         }
     659             : 
     660           0 :         if (notify->link_index > adev->dm.dc->link_count) {
     661           0 :                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
     662           0 :                 return;
     663             :         }
     664             : 
     665           0 :         link_index = notify->link_index;
     666           0 :         link = adev->dm.dc->links[link_index];
     667           0 :         dev = adev->dm.ddev;
     668             : 
     669           0 :         drm_connector_list_iter_begin(dev, &iter);
     670           0 :         drm_for_each_connector_iter(connector, &iter) {
     671           0 :                 aconnector = to_amdgpu_dm_connector(connector);
     672           0 :                 if (link && aconnector->dc_link == link) {
     673           0 :                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
     674           0 :                         hpd_aconnector = aconnector;
     675           0 :                         break;
     676             :                 }
     677             :         }
     678           0 :         drm_connector_list_iter_end(&iter);
     679             : 
     680           0 :         if (hpd_aconnector) {
     681           0 :                 if (notify->type == DMUB_NOTIFICATION_HPD)
     682           0 :                         handle_hpd_irq_helper(hpd_aconnector);
     683           0 :                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
     684           0 :                         handle_hpd_rx_irq(hpd_aconnector);
     685             :         }
     686             : }
     687             : 
     688             : /**
     689             :  * register_dmub_notify_callback - Sets callback for DMUB notify
     690             :  * @adev: amdgpu_device pointer
     691             :  * @type: Type of dmub notification
     692             :  * @callback: Dmub interrupt callback function
     693             :  * @dmub_int_thread_offload: offload indicator
     694             :  *
     695             :  * API to register a dmub callback handler for a dmub notification
     696             :  * Also sets indicator whether callback processing to be offloaded.
     697             :  * to dmub interrupt handling thread
     698             :  * Return: true if successfully registered, false if there is existing registration
     699             :  */
     700             : static bool register_dmub_notify_callback(struct amdgpu_device *adev,
     701             :                                           enum dmub_notification_type type,
     702             :                                           dmub_notify_interrupt_callback_t callback,
     703             :                                           bool dmub_int_thread_offload)
     704             : {
     705             :         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
     706           0 :                 adev->dm.dmub_callback[type] = callback;
     707           0 :                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
     708             :         } else
     709             :                 return false;
     710             : 
     711             :         return true;
     712             : }
     713             : 
     714           0 : static void dm_handle_hpd_work(struct work_struct *work)
     715             : {
     716             :         struct dmub_hpd_work *dmub_hpd_wrk;
     717             : 
     718           0 :         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
     719             : 
     720           0 :         if (!dmub_hpd_wrk->dmub_notify) {
     721           0 :                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
     722           0 :                 return;
     723             :         }
     724             : 
     725           0 :         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
     726           0 :                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
     727             :                 dmub_hpd_wrk->dmub_notify);
     728             :         }
     729             : 
     730           0 :         kfree(dmub_hpd_wrk->dmub_notify);
     731           0 :         kfree(dmub_hpd_wrk);
     732             : 
     733             : }
     734             : 
     735             : #define DMUB_TRACE_MAX_READ 64
     736             : /**
     737             :  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
     738             :  * @interrupt_params: used for determining the Outbox instance
     739             :  *
     740             :  * Handles the Outbox Interrupt
     741             :  * event handler.
     742             :  */
     743           0 : static void dm_dmub_outbox1_low_irq(void *interrupt_params)
     744             : {
     745             :         struct dmub_notification notify;
     746           0 :         struct common_irq_params *irq_params = interrupt_params;
     747           0 :         struct amdgpu_device *adev = irq_params->adev;
     748           0 :         struct amdgpu_display_manager *dm = &adev->dm;
     749           0 :         struct dmcub_trace_buf_entry entry = { 0 };
     750           0 :         uint32_t count = 0;
     751             :         struct dmub_hpd_work *dmub_hpd_wrk;
     752           0 :         struct dc_link *plink = NULL;
     753             : 
     754           0 :         if (dc_enable_dmub_notifications(adev->dm.dc) &&
     755           0 :                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
     756             : 
     757             :                 do {
     758           0 :                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
     759           0 :                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
     760           0 :                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
     761           0 :                                 continue;
     762             :                         }
     763           0 :                         if (!dm->dmub_callback[notify.type]) {
     764           0 :                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
     765           0 :                                 continue;
     766             :                         }
     767           0 :                         if (dm->dmub_thread_offload[notify.type] == true) {
     768           0 :                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
     769           0 :                                 if (!dmub_hpd_wrk) {
     770           0 :                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
     771           0 :                                         return;
     772             :                                 }
     773           0 :                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
     774           0 :                                 if (!dmub_hpd_wrk->dmub_notify) {
     775           0 :                                         kfree(dmub_hpd_wrk);
     776           0 :                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
     777           0 :                                         return;
     778             :                                 }
     779           0 :                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
     780             :                                 if (dmub_hpd_wrk->dmub_notify)
     781           0 :                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
     782           0 :                                 dmub_hpd_wrk->adev = adev;
     783           0 :                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
     784           0 :                                         plink = adev->dm.dc->links[notify.link_index];
     785           0 :                                         if (plink) {
     786           0 :                                                 plink->hpd_status =
     787           0 :                                                         notify.hpd_status == DP_HPD_PLUG;
     788             :                                         }
     789             :                                 }
     790           0 :                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
     791             :                         } else {
     792           0 :                                 dm->dmub_callback[notify.type](adev, &notify);
     793             :                         }
     794           0 :                 } while (notify.pending_notification);
     795             :         }
     796             : 
     797             : 
     798             :         do {
     799           0 :                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
     800           0 :                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
     801             :                                                         entry.param0, entry.param1);
     802             : 
     803           0 :                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
     804             :                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
     805             :                 } else
     806             :                         break;
     807             : 
     808           0 :                 count++;
     809             : 
     810           0 :         } while (count <= DMUB_TRACE_MAX_READ);
     811             : 
     812           0 :         if (count > DMUB_TRACE_MAX_READ)
     813           0 :                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
     814             : }
     815             : 
     816           0 : static int dm_set_clockgating_state(void *handle,
     817             :                   enum amd_clockgating_state state)
     818             : {
     819           0 :         return 0;
     820             : }
     821             : 
     822           0 : static int dm_set_powergating_state(void *handle,
     823             :                   enum amd_powergating_state state)
     824             : {
     825           0 :         return 0;
     826             : }
     827             : 
     828             : /* Prototypes of private functions */
     829             : static int dm_early_init(void* handle);
     830             : 
     831             : /* Allocate memory for FBC compressed data  */
     832           0 : static void amdgpu_dm_fbc_init(struct drm_connector *connector)
     833             : {
     834           0 :         struct drm_device *dev = connector->dev;
     835           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
     836           0 :         struct dm_compressor_info *compressor = &adev->dm.compressor;
     837           0 :         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
     838             :         struct drm_display_mode *mode;
     839           0 :         unsigned long max_size = 0;
     840             : 
     841           0 :         if (adev->dm.dc->fbc_compressor == NULL)
     842             :                 return;
     843             : 
     844           0 :         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
     845             :                 return;
     846             : 
     847           0 :         if (compressor->bo_ptr)
     848             :                 return;
     849             : 
     850             : 
     851           0 :         list_for_each_entry(mode, &connector->modes, head) {
     852           0 :                 if (max_size < mode->htotal * mode->vtotal)
     853           0 :                         max_size = mode->htotal * mode->vtotal;
     854             :         }
     855             : 
     856           0 :         if (max_size) {
     857           0 :                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
     858             :                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
     859           0 :                             &compressor->gpu_addr, &compressor->cpu_addr);
     860             : 
     861           0 :                 if (r)
     862           0 :                         DRM_ERROR("DM: Failed to initialize FBC\n");
     863             :                 else {
     864           0 :                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
     865           0 :                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
     866             :                 }
     867             : 
     868             :         }
     869             : 
     870             : }
     871             : 
     872           0 : static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
     873             :                                           int pipe, bool *enabled,
     874             :                                           unsigned char *buf, int max_bytes)
     875             : {
     876           0 :         struct drm_device *dev = dev_get_drvdata(kdev);
     877           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
     878             :         struct drm_connector *connector;
     879             :         struct drm_connector_list_iter conn_iter;
     880             :         struct amdgpu_dm_connector *aconnector;
     881           0 :         int ret = 0;
     882             : 
     883           0 :         *enabled = false;
     884             : 
     885           0 :         mutex_lock(&adev->dm.audio_lock);
     886             : 
     887           0 :         drm_connector_list_iter_begin(dev, &conn_iter);
     888           0 :         drm_for_each_connector_iter(connector, &conn_iter) {
     889           0 :                 aconnector = to_amdgpu_dm_connector(connector);
     890           0 :                 if (aconnector->audio_inst != port)
     891           0 :                         continue;
     892             : 
     893           0 :                 *enabled = true;
     894           0 :                 ret = drm_eld_size(connector->eld);
     895           0 :                 memcpy(buf, connector->eld, min(max_bytes, ret));
     896             : 
     897           0 :                 break;
     898             :         }
     899           0 :         drm_connector_list_iter_end(&conn_iter);
     900             : 
     901           0 :         mutex_unlock(&adev->dm.audio_lock);
     902             : 
     903           0 :         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
     904             : 
     905           0 :         return ret;
     906             : }
     907             : 
     908             : static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
     909             :         .get_eld = amdgpu_dm_audio_component_get_eld,
     910             : };
     911             : 
     912           0 : static int amdgpu_dm_audio_component_bind(struct device *kdev,
     913             :                                        struct device *hda_kdev, void *data)
     914             : {
     915           0 :         struct drm_device *dev = dev_get_drvdata(kdev);
     916           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
     917           0 :         struct drm_audio_component *acomp = data;
     918             : 
     919           0 :         acomp->ops = &amdgpu_dm_audio_component_ops;
     920           0 :         acomp->dev = kdev;
     921           0 :         adev->dm.audio_component = acomp;
     922             : 
     923           0 :         return 0;
     924             : }
     925             : 
     926           0 : static void amdgpu_dm_audio_component_unbind(struct device *kdev,
     927             :                                           struct device *hda_kdev, void *data)
     928             : {
     929           0 :         struct drm_device *dev = dev_get_drvdata(kdev);
     930           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
     931           0 :         struct drm_audio_component *acomp = data;
     932             : 
     933           0 :         acomp->ops = NULL;
     934           0 :         acomp->dev = NULL;
     935           0 :         adev->dm.audio_component = NULL;
     936           0 : }
     937             : 
     938             : static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
     939             :         .bind   = amdgpu_dm_audio_component_bind,
     940             :         .unbind = amdgpu_dm_audio_component_unbind,
     941             : };
     942             : 
     943           0 : static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
     944             : {
     945             :         int i, ret;
     946             : 
     947           0 :         if (!amdgpu_audio)
     948             :                 return 0;
     949             : 
     950           0 :         adev->mode_info.audio.enabled = true;
     951             : 
     952           0 :         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
     953             : 
     954           0 :         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
     955           0 :                 adev->mode_info.audio.pin[i].channels = -1;
     956           0 :                 adev->mode_info.audio.pin[i].rate = -1;
     957           0 :                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
     958           0 :                 adev->mode_info.audio.pin[i].status_bits = 0;
     959           0 :                 adev->mode_info.audio.pin[i].category_code = 0;
     960           0 :                 adev->mode_info.audio.pin[i].connected = false;
     961           0 :                 adev->mode_info.audio.pin[i].id =
     962           0 :                         adev->dm.dc->res_pool->audios[i]->inst;
     963           0 :                 adev->mode_info.audio.pin[i].offset = 0;
     964             :         }
     965             : 
     966           0 :         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
     967           0 :         if (ret < 0)
     968             :                 return ret;
     969             : 
     970           0 :         adev->dm.audio_registered = true;
     971             : 
     972           0 :         return 0;
     973             : }
     974             : 
     975           0 : static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
     976             : {
     977           0 :         if (!amdgpu_audio)
     978             :                 return;
     979             : 
     980           0 :         if (!adev->mode_info.audio.enabled)
     981             :                 return;
     982             : 
     983           0 :         if (adev->dm.audio_registered) {
     984           0 :                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
     985           0 :                 adev->dm.audio_registered = false;
     986             :         }
     987             : 
     988             :         /* TODO: Disable audio? */
     989             : 
     990           0 :         adev->mode_info.audio.enabled = false;
     991             : }
     992             : 
     993           0 : static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
     994             : {
     995           0 :         struct drm_audio_component *acomp = adev->dm.audio_component;
     996             : 
     997           0 :         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
     998           0 :                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
     999             : 
    1000           0 :                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
    1001             :                                                  pin, -1);
    1002             :         }
    1003           0 : }
    1004             : 
    1005           0 : static int dm_dmub_hw_init(struct amdgpu_device *adev)
    1006             : {
    1007             :         const struct dmcub_firmware_header_v1_0 *hdr;
    1008           0 :         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
    1009           0 :         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
    1010           0 :         const struct firmware *dmub_fw = adev->dm.dmub_fw;
    1011           0 :         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
    1012           0 :         struct abm *abm = adev->dm.dc->res_pool->abm;
    1013             :         struct dmub_srv_hw_params hw_params;
    1014             :         enum dmub_status status;
    1015             :         const unsigned char *fw_inst_const, *fw_bss_data;
    1016             :         uint32_t i, fw_inst_const_size, fw_bss_data_size;
    1017             :         bool has_hw_support;
    1018             : 
    1019           0 :         if (!dmub_srv)
    1020             :                 /* DMUB isn't supported on the ASIC. */
    1021             :                 return 0;
    1022             : 
    1023           0 :         if (!fb_info) {
    1024           0 :                 DRM_ERROR("No framebuffer info for DMUB service.\n");
    1025           0 :                 return -EINVAL;
    1026             :         }
    1027             : 
    1028           0 :         if (!dmub_fw) {
    1029             :                 /* Firmware required for DMUB support. */
    1030           0 :                 DRM_ERROR("No firmware provided for DMUB.\n");
    1031           0 :                 return -EINVAL;
    1032             :         }
    1033             : 
    1034           0 :         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
    1035           0 :         if (status != DMUB_STATUS_OK) {
    1036           0 :                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
    1037           0 :                 return -EINVAL;
    1038             :         }
    1039             : 
    1040           0 :         if (!has_hw_support) {
    1041           0 :                 DRM_INFO("DMUB unsupported on ASIC\n");
    1042           0 :                 return 0;
    1043             :         }
    1044             : 
    1045             :         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
    1046           0 :         status = dmub_srv_hw_reset(dmub_srv);
    1047           0 :         if (status != DMUB_STATUS_OK)
    1048           0 :                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
    1049             : 
    1050           0 :         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
    1051             : 
    1052           0 :         fw_inst_const = dmub_fw->data +
    1053           0 :                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
    1054             :                         PSP_HEADER_BYTES;
    1055             : 
    1056           0 :         fw_bss_data = dmub_fw->data +
    1057           0 :                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
    1058           0 :                       le32_to_cpu(hdr->inst_const_bytes);
    1059             : 
    1060             :         /* Copy firmware and bios info into FB memory. */
    1061           0 :         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
    1062             :                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
    1063             : 
    1064           0 :         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
    1065             : 
    1066             :         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
    1067             :          * amdgpu_ucode_init_single_fw will load dmub firmware
    1068             :          * fw_inst_const part to cw0; otherwise, the firmware back door load
    1069             :          * will be done by dm_dmub_hw_init
    1070             :          */
    1071           0 :         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
    1072           0 :                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
    1073             :                                 fw_inst_const_size);
    1074             :         }
    1075             : 
    1076           0 :         if (fw_bss_data_size)
    1077           0 :                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
    1078             :                        fw_bss_data, fw_bss_data_size);
    1079             : 
    1080             :         /* Copy firmware bios info into FB memory. */
    1081           0 :         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
    1082           0 :                adev->bios_size);
    1083             : 
    1084             :         /* Reset regions that need to be reset. */
    1085           0 :         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
    1086           0 :         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
    1087             : 
    1088           0 :         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
    1089           0 :                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
    1090             : 
    1091           0 :         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
    1092           0 :                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
    1093             : 
    1094             :         /* Initialize hardware. */
    1095           0 :         memset(&hw_params, 0, sizeof(hw_params));
    1096           0 :         hw_params.fb_base = adev->gmc.fb_start;
    1097           0 :         hw_params.fb_offset = adev->gmc.aper_base;
    1098             : 
    1099             :         /* backdoor load firmware and trigger dmub running */
    1100           0 :         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
    1101           0 :                 hw_params.load_inst_const = true;
    1102             : 
    1103           0 :         if (dmcu)
    1104           0 :                 hw_params.psp_version = dmcu->psp_version;
    1105             : 
    1106           0 :         for (i = 0; i < fb_info->num_fb; ++i)
    1107           0 :                 hw_params.fb[i] = &fb_info->fb[i];
    1108             : 
    1109           0 :         switch (adev->ip_versions[DCE_HWIP][0]) {
    1110             :         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
    1111           0 :                 hw_params.dpia_supported = true;
    1112           0 :                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
    1113           0 :                 break;
    1114             :         default:
    1115             :                 break;
    1116             :         }
    1117             : 
    1118           0 :         status = dmub_srv_hw_init(dmub_srv, &hw_params);
    1119           0 :         if (status != DMUB_STATUS_OK) {
    1120           0 :                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
    1121           0 :                 return -EINVAL;
    1122             :         }
    1123             : 
    1124             :         /* Wait for firmware load to finish. */
    1125           0 :         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
    1126           0 :         if (status != DMUB_STATUS_OK)
    1127           0 :                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
    1128             : 
    1129             :         /* Init DMCU and ABM if available. */
    1130           0 :         if (dmcu && abm) {
    1131           0 :                 dmcu->funcs->dmcu_init(dmcu);
    1132           0 :                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
    1133             :         }
    1134             : 
    1135           0 :         if (!adev->dm.dc->ctx->dmub_srv)
    1136           0 :                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
    1137           0 :         if (!adev->dm.dc->ctx->dmub_srv) {
    1138           0 :                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
    1139           0 :                 return -ENOMEM;
    1140             :         }
    1141             : 
    1142           0 :         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
    1143             :                  adev->dm.dmcub_fw_version);
    1144             : 
    1145           0 :         return 0;
    1146             : }
    1147             : 
    1148           0 : static void dm_dmub_hw_resume(struct amdgpu_device *adev)
    1149             : {
    1150           0 :         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
    1151             :         enum dmub_status status;
    1152             :         bool init;
    1153             : 
    1154           0 :         if (!dmub_srv) {
    1155             :                 /* DMUB isn't supported on the ASIC. */
    1156           0 :                 return;
    1157             :         }
    1158             : 
    1159           0 :         status = dmub_srv_is_hw_init(dmub_srv, &init);
    1160           0 :         if (status != DMUB_STATUS_OK)
    1161           0 :                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
    1162             : 
    1163           0 :         if (status == DMUB_STATUS_OK && init) {
    1164             :                 /* Wait for firmware load to finish. */
    1165           0 :                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
    1166           0 :                 if (status != DMUB_STATUS_OK)
    1167           0 :                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
    1168             :         } else {
    1169             :                 /* Perform the full hardware initialization. */
    1170           0 :                 dm_dmub_hw_init(adev);
    1171             :         }
    1172             : }
    1173             : 
    1174           0 : static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
    1175             : {
    1176             :         uint64_t pt_base;
    1177             :         uint32_t logical_addr_low;
    1178             :         uint32_t logical_addr_high;
    1179             :         uint32_t agp_base, agp_bot, agp_top;
    1180             :         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
    1181             : 
    1182           0 :         memset(pa_config, 0, sizeof(*pa_config));
    1183             : 
    1184           0 :         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
    1185           0 :         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
    1186             : 
    1187           0 :         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
    1188             :                 /*
    1189             :                  * Raven2 has a HW issue that it is unable to use the vram which
    1190             :                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
    1191             :                  * workaround that increase system aperture high address (add 1)
    1192             :                  * to get rid of the VM fault and hardware hang.
    1193             :                  */
    1194           0 :                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
    1195             :         else
    1196           0 :                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
    1197             : 
    1198           0 :         agp_base = 0;
    1199           0 :         agp_bot = adev->gmc.agp_start >> 24;
    1200           0 :         agp_top = adev->gmc.agp_end >> 24;
    1201             : 
    1202             : 
    1203           0 :         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
    1204           0 :         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
    1205           0 :         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
    1206           0 :         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
    1207           0 :         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
    1208           0 :         page_table_base.low_part = lower_32_bits(pt_base);
    1209             : 
    1210           0 :         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
    1211           0 :         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
    1212             : 
    1213           0 :         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
    1214           0 :         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
    1215           0 :         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
    1216             : 
    1217           0 :         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
    1218           0 :         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
    1219           0 :         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
    1220             : 
    1221           0 :         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
    1222           0 :         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
    1223           0 :         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
    1224             : 
    1225           0 :         pa_config->is_hvm_enabled = 0;
    1226             : 
    1227           0 : }
    1228             : 
    1229           0 : static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
    1230             : {
    1231             :         struct hpd_rx_irq_offload_work *offload_work;
    1232             :         struct amdgpu_dm_connector *aconnector;
    1233             :         struct dc_link *dc_link;
    1234             :         struct amdgpu_device *adev;
    1235           0 :         enum dc_connection_type new_connection_type = dc_connection_none;
    1236             :         unsigned long flags;
    1237             : 
    1238           0 :         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
    1239           0 :         aconnector = offload_work->offload_wq->aconnector;
    1240             : 
    1241           0 :         if (!aconnector) {
    1242           0 :                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
    1243           0 :                 goto skip;
    1244             :         }
    1245             : 
    1246           0 :         adev = drm_to_adev(aconnector->base.dev);
    1247           0 :         dc_link = aconnector->dc_link;
    1248             : 
    1249           0 :         mutex_lock(&aconnector->hpd_lock);
    1250           0 :         if (!dc_link_detect_sink(dc_link, &new_connection_type))
    1251           0 :                 DRM_ERROR("KMS: Failed to detect connector\n");
    1252           0 :         mutex_unlock(&aconnector->hpd_lock);
    1253             : 
    1254           0 :         if (new_connection_type == dc_connection_none)
    1255             :                 goto skip;
    1256             : 
    1257           0 :         if (amdgpu_in_reset(adev))
    1258             :                 goto skip;
    1259             : 
    1260           0 :         mutex_lock(&adev->dm.dc_lock);
    1261           0 :         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
    1262           0 :                 dc_link_dp_handle_automated_test(dc_link);
    1263           0 :         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
    1264           0 :                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
    1265           0 :                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
    1266           0 :                 dc_link_dp_handle_link_loss(dc_link);
    1267           0 :                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
    1268           0 :                 offload_work->offload_wq->is_handling_link_loss = false;
    1269           0 :                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
    1270             :         }
    1271           0 :         mutex_unlock(&adev->dm.dc_lock);
    1272             : 
    1273             : skip:
    1274           0 :         kfree(offload_work);
    1275             : 
    1276           0 : }
    1277             : 
    1278           0 : static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
    1279             : {
    1280           0 :         int max_caps = dc->caps.max_links;
    1281           0 :         int i = 0;
    1282           0 :         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
    1283             : 
    1284           0 :         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
    1285             : 
    1286           0 :         if (!hpd_rx_offload_wq)
    1287             :                 return NULL;
    1288             : 
    1289             : 
    1290           0 :         for (i = 0; i < max_caps; i++) {
    1291           0 :                 hpd_rx_offload_wq[i].wq =
    1292           0 :                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
    1293             : 
    1294           0 :                 if (hpd_rx_offload_wq[i].wq == NULL) {
    1295           0 :                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
    1296             :                         return NULL;
    1297             :                 }
    1298             : 
    1299           0 :                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
    1300             :         }
    1301             : 
    1302             :         return hpd_rx_offload_wq;
    1303             : }
    1304             : 
    1305             : struct amdgpu_stutter_quirk {
    1306             :         u16 chip_vendor;
    1307             :         u16 chip_device;
    1308             :         u16 subsys_vendor;
    1309             :         u16 subsys_device;
    1310             :         u8 revision;
    1311             : };
    1312             : 
    1313             : static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
    1314             :         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
    1315             :         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
    1316             :         { 0, 0, 0, 0, 0 },
    1317             : };
    1318             : 
    1319           0 : static bool dm_should_disable_stutter(struct pci_dev *pdev)
    1320             : {
    1321           0 :         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
    1322             : 
    1323           0 :         while (p && p->chip_device != 0) {
    1324           0 :                 if (pdev->vendor == p->chip_vendor &&
    1325           0 :                     pdev->device == p->chip_device &&
    1326           0 :                     pdev->subsystem_vendor == p->subsys_vendor &&
    1327           0 :                     pdev->subsystem_device == p->subsys_device &&
    1328           0 :                     pdev->revision == p->revision) {
    1329             :                         return true;
    1330             :                 }
    1331           0 :                 ++p;
    1332             :         }
    1333             :         return false;
    1334             : }
    1335             : 
    1336             : static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
    1337             :         {
    1338             :                 .matches = {
    1339             :                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
    1340             :                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
    1341             :                 },
    1342             :         },
    1343             :         {
    1344             :                 .matches = {
    1345             :                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
    1346             :                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
    1347             :                 },
    1348             :         },
    1349             :         {
    1350             :                 .matches = {
    1351             :                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
    1352             :                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
    1353             :                 },
    1354             :         },
    1355             :         {}
    1356             : };
    1357             : 
    1358             : static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
    1359             : {
    1360             :         const struct dmi_system_id *dmi_id;
    1361             : 
    1362           0 :         dm->aux_hpd_discon_quirk = false;
    1363             : 
    1364           0 :         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
    1365             :         if (dmi_id) {
    1366             :                 dm->aux_hpd_discon_quirk = true;
    1367             :                 DRM_INFO("aux_hpd_discon_quirk attached\n");
    1368             :         }
    1369             : }
    1370             : 
    1371           0 : static int amdgpu_dm_init(struct amdgpu_device *adev)
    1372             : {
    1373             :         struct dc_init_data init_data;
    1374             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    1375             :         struct dc_callback_init init_params;
    1376             : #endif
    1377             :         int r;
    1378             : 
    1379           0 :         adev->dm.ddev = adev_to_drm(adev);
    1380           0 :         adev->dm.adev = adev;
    1381             : 
    1382             :         /* Zero all the fields */
    1383           0 :         memset(&init_data, 0, sizeof(init_data));
    1384             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    1385             :         memset(&init_params, 0, sizeof(init_params));
    1386             : #endif
    1387             : 
    1388           0 :         mutex_init(&adev->dm.dc_lock);
    1389           0 :         mutex_init(&adev->dm.audio_lock);
    1390           0 :         spin_lock_init(&adev->dm.vblank_lock);
    1391             : 
    1392           0 :         if(amdgpu_dm_irq_init(adev)) {
    1393           0 :                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
    1394           0 :                 goto error;
    1395             :         }
    1396             : 
    1397           0 :         init_data.asic_id.chip_family = adev->family;
    1398             : 
    1399           0 :         init_data.asic_id.pci_revision_id = adev->pdev->revision;
    1400           0 :         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
    1401           0 :         init_data.asic_id.chip_id = adev->pdev->device;
    1402             : 
    1403           0 :         init_data.asic_id.vram_width = adev->gmc.vram_width;
    1404             :         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
    1405           0 :         init_data.asic_id.atombios_base_address =
    1406           0 :                 adev->mode_info.atom_context->bios;
    1407             : 
    1408           0 :         init_data.driver = adev;
    1409             : 
    1410           0 :         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
    1411             : 
    1412           0 :         if (!adev->dm.cgs_device) {
    1413           0 :                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
    1414           0 :                 goto error;
    1415             :         }
    1416             : 
    1417           0 :         init_data.cgs_device = adev->dm.cgs_device;
    1418             : 
    1419           0 :         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
    1420             : 
    1421           0 :         switch (adev->ip_versions[DCE_HWIP][0]) {
    1422             :         case IP_VERSION(2, 1, 0):
    1423           0 :                 switch (adev->dm.dmcub_fw_version) {
    1424             :                 case 0: /* development */
    1425             :                 case 0x1: /* linux-firmware.git hash 6d9f399 */
    1426             :                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
    1427           0 :                         init_data.flags.disable_dmcu = false;
    1428           0 :                         break;
    1429             :                 default:
    1430           0 :                         init_data.flags.disable_dmcu = true;
    1431             :                 }
    1432             :                 break;
    1433             :         case IP_VERSION(2, 0, 3):
    1434           0 :                 init_data.flags.disable_dmcu = true;
    1435           0 :                 break;
    1436             :         default:
    1437             :                 break;
    1438             :         }
    1439             : 
    1440           0 :         switch (adev->asic_type) {
    1441             :         case CHIP_CARRIZO:
    1442             :         case CHIP_STONEY:
    1443           0 :                 init_data.flags.gpu_vm_support = true;
    1444           0 :                 break;
    1445             :         default:
    1446           0 :                 switch (adev->ip_versions[DCE_HWIP][0]) {
    1447             :                 case IP_VERSION(1, 0, 0):
    1448             :                 case IP_VERSION(1, 0, 1):
    1449             :                         /* enable S/G on PCO and RV2 */
    1450           0 :                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
    1451             :                             (adev->apu_flags & AMD_APU_IS_PICASSO))
    1452           0 :                                 init_data.flags.gpu_vm_support = true;
    1453             :                         break;
    1454             :                 case IP_VERSION(2, 1, 0):
    1455             :                 case IP_VERSION(3, 0, 1):
    1456             :                 case IP_VERSION(3, 1, 2):
    1457             :                 case IP_VERSION(3, 1, 3):
    1458             :                 case IP_VERSION(3, 1, 5):
    1459             :                 case IP_VERSION(3, 1, 6):
    1460           0 :                         init_data.flags.gpu_vm_support = true;
    1461           0 :                         break;
    1462             :                 default:
    1463             :                         break;
    1464             :                 }
    1465             :                 break;
    1466             :         }
    1467             : 
    1468           0 :         if (init_data.flags.gpu_vm_support)
    1469           0 :                 adev->mode_info.gpu_vm_support = true;
    1470             : 
    1471           0 :         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
    1472           0 :                 init_data.flags.fbc_support = true;
    1473             : 
    1474           0 :         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
    1475           0 :                 init_data.flags.multi_mon_pp_mclk_switch = true;
    1476             : 
    1477           0 :         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
    1478           0 :                 init_data.flags.disable_fractional_pwm = true;
    1479             : 
    1480           0 :         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
    1481           0 :                 init_data.flags.edp_no_power_sequencing = true;
    1482             : 
    1483           0 :         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
    1484           0 :                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
    1485           0 :         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
    1486           0 :                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
    1487             : 
    1488           0 :         init_data.flags.seamless_boot_edp_requested = false;
    1489             : 
    1490           0 :         if (check_seamless_boot_capability(adev)) {
    1491           0 :                 init_data.flags.seamless_boot_edp_requested = true;
    1492           0 :                 init_data.flags.allow_seamless_boot_optimization = true;
    1493           0 :                 DRM_INFO("Seamless boot condition check passed\n");
    1494             :         }
    1495             : 
    1496           0 :         init_data.flags.enable_mipi_converter_optimization = true;
    1497             : 
    1498           0 :         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
    1499           0 :         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
    1500             : 
    1501           0 :         INIT_LIST_HEAD(&adev->dm.da_list);
    1502             : 
    1503           0 :         retrieve_dmi_info(&adev->dm);
    1504             : 
    1505             :         /* Display Core create. */
    1506           0 :         adev->dm.dc = dc_create(&init_data);
    1507             : 
    1508           0 :         if (adev->dm.dc) {
    1509           0 :                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
    1510             :         } else {
    1511           0 :                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
    1512           0 :                 goto error;
    1513             :         }
    1514             : 
    1515           0 :         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
    1516           0 :                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
    1517           0 :                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
    1518             :         }
    1519             : 
    1520           0 :         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
    1521           0 :                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
    1522           0 :         if (dm_should_disable_stutter(adev->pdev))
    1523           0 :                 adev->dm.dc->debug.disable_stutter = true;
    1524             : 
    1525           0 :         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
    1526           0 :                 adev->dm.dc->debug.disable_stutter = true;
    1527             : 
    1528           0 :         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
    1529           0 :                 adev->dm.dc->debug.disable_dsc = true;
    1530             :         }
    1531             : 
    1532           0 :         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
    1533           0 :                 adev->dm.dc->debug.disable_clock_gate = true;
    1534             : 
    1535           0 :         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
    1536           0 :                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
    1537             : 
    1538           0 :         adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
    1539             : 
    1540           0 :         r = dm_dmub_hw_init(adev);
    1541           0 :         if (r) {
    1542           0 :                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
    1543           0 :                 goto error;
    1544             :         }
    1545             : 
    1546           0 :         dc_hardware_init(adev->dm.dc);
    1547             : 
    1548           0 :         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
    1549           0 :         if (!adev->dm.hpd_rx_offload_wq) {
    1550           0 :                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
    1551           0 :                 goto error;
    1552             :         }
    1553             : 
    1554           0 :         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
    1555             :                 struct dc_phy_addr_space_config pa_config;
    1556             : 
    1557           0 :                 mmhub_read_system_context(adev, &pa_config);
    1558             : 
    1559             :                 // Call the DC init_memory func
    1560           0 :                 dc_setup_system_context(adev->dm.dc, &pa_config);
    1561             :         }
    1562             : 
    1563           0 :         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
    1564           0 :         if (!adev->dm.freesync_module) {
    1565           0 :                 DRM_ERROR(
    1566             :                 "amdgpu: failed to initialize freesync_module.\n");
    1567             :         } else
    1568           0 :                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
    1569             :                                 adev->dm.freesync_module);
    1570             : 
    1571           0 :         amdgpu_dm_init_color_mod();
    1572             : 
    1573           0 :         if (adev->dm.dc->caps.max_links > 0) {
    1574           0 :                 adev->dm.vblank_control_workqueue =
    1575           0 :                         create_singlethread_workqueue("dm_vblank_control_workqueue");
    1576           0 :                 if (!adev->dm.vblank_control_workqueue)
    1577           0 :                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
    1578             :         }
    1579             : 
    1580             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    1581             :         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
    1582             :                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
    1583             : 
    1584             :                 if (!adev->dm.hdcp_workqueue)
    1585             :                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
    1586             :                 else
    1587             :                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
    1588             : 
    1589             :                 dc_init_callbacks(adev->dm.dc, &init_params);
    1590             :         }
    1591             : #endif
    1592             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    1593             :         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
    1594             : #endif
    1595           0 :         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
    1596           0 :                 init_completion(&adev->dm.dmub_aux_transfer_done);
    1597           0 :                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
    1598           0 :                 if (!adev->dm.dmub_notify) {
    1599           0 :                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
    1600           0 :                         goto error;
    1601             :                 }
    1602             : 
    1603           0 :                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
    1604           0 :                 if (!adev->dm.delayed_hpd_wq) {
    1605           0 :                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
    1606           0 :                         goto error;
    1607             :                 }
    1608             : 
    1609           0 :                 amdgpu_dm_outbox_init(adev);
    1610           0 :                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
    1611             :                         dmub_aux_setconfig_callback, false)) {
    1612             :                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
    1613             :                         goto error;
    1614             :                 }
    1615           0 :                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
    1616             :                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
    1617             :                         goto error;
    1618             :                 }
    1619           0 :                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
    1620             :                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
    1621             :                         goto error;
    1622             :                 }
    1623             :         }
    1624             : 
    1625           0 :         if (amdgpu_dm_initialize_drm_device(adev)) {
    1626           0 :                 DRM_ERROR(
    1627             :                 "amdgpu: failed to initialize sw for display support.\n");
    1628           0 :                 goto error;
    1629             :         }
    1630             : 
    1631             :         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
    1632             :          * It is expected that DMUB will resend any pending notifications at this point, for
    1633             :          * example HPD from DPIA.
    1634             :          */
    1635           0 :         if (dc_is_dmub_outbox_supported(adev->dm.dc))
    1636           0 :                 dc_enable_dmub_outbox(adev->dm.dc);
    1637             : 
    1638             :         /* create fake encoders for MST */
    1639           0 :         dm_dp_create_fake_mst_encoders(adev);
    1640             : 
    1641             :         /* TODO: Add_display_info? */
    1642             : 
    1643             :         /* TODO use dynamic cursor width */
    1644           0 :         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
    1645           0 :         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
    1646             : 
    1647           0 :         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
    1648           0 :                 DRM_ERROR(
    1649             :                 "amdgpu: failed to initialize sw for display support.\n");
    1650           0 :                 goto error;
    1651             :         }
    1652             : 
    1653             : 
    1654           0 :         DRM_DEBUG_DRIVER("KMS initialized.\n");
    1655             : 
    1656           0 :         return 0;
    1657             : error:
    1658           0 :         amdgpu_dm_fini(adev);
    1659             : 
    1660           0 :         return -EINVAL;
    1661             : }
    1662             : 
    1663           0 : static int amdgpu_dm_early_fini(void *handle)
    1664             : {
    1665           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    1666             : 
    1667           0 :         amdgpu_dm_audio_fini(adev);
    1668             : 
    1669           0 :         return 0;
    1670             : }
    1671             : 
    1672           0 : static void amdgpu_dm_fini(struct amdgpu_device *adev)
    1673             : {
    1674             :         int i;
    1675             : 
    1676           0 :         if (adev->dm.vblank_control_workqueue) {
    1677           0 :                 destroy_workqueue(adev->dm.vblank_control_workqueue);
    1678           0 :                 adev->dm.vblank_control_workqueue = NULL;
    1679             :         }
    1680             : 
    1681           0 :         for (i = 0; i < adev->dm.display_indexes_num; i++) {
    1682           0 :                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
    1683             :         }
    1684             : 
    1685           0 :         amdgpu_dm_destroy_drm_device(&adev->dm);
    1686             : 
    1687             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    1688             :         if (adev->dm.crc_rd_wrk) {
    1689             :                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
    1690             :                 kfree(adev->dm.crc_rd_wrk);
    1691             :                 adev->dm.crc_rd_wrk = NULL;
    1692             :         }
    1693             : #endif
    1694             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    1695             :         if (adev->dm.hdcp_workqueue) {
    1696             :                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
    1697             :                 adev->dm.hdcp_workqueue = NULL;
    1698             :         }
    1699             : 
    1700             :         if (adev->dm.dc)
    1701             :                 dc_deinit_callbacks(adev->dm.dc);
    1702             : #endif
    1703             : 
    1704           0 :         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
    1705             : 
    1706           0 :         if (dc_enable_dmub_notifications(adev->dm.dc)) {
    1707           0 :                 kfree(adev->dm.dmub_notify);
    1708           0 :                 adev->dm.dmub_notify = NULL;
    1709           0 :                 destroy_workqueue(adev->dm.delayed_hpd_wq);
    1710           0 :                 adev->dm.delayed_hpd_wq = NULL;
    1711             :         }
    1712             : 
    1713           0 :         if (adev->dm.dmub_bo)
    1714           0 :                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
    1715             :                                       &adev->dm.dmub_bo_gpu_addr,
    1716             :                                       &adev->dm.dmub_bo_cpu_addr);
    1717             : 
    1718           0 :         if (adev->dm.hpd_rx_offload_wq) {
    1719           0 :                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
    1720           0 :                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
    1721           0 :                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
    1722           0 :                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
    1723             :                         }
    1724             :                 }
    1725             : 
    1726           0 :                 kfree(adev->dm.hpd_rx_offload_wq);
    1727           0 :                 adev->dm.hpd_rx_offload_wq = NULL;
    1728             :         }
    1729             : 
    1730             :         /* DC Destroy TODO: Replace destroy DAL */
    1731           0 :         if (adev->dm.dc)
    1732           0 :                 dc_destroy(&adev->dm.dc);
    1733             :         /*
    1734             :          * TODO: pageflip, vlank interrupt
    1735             :          *
    1736             :          * amdgpu_dm_irq_fini(adev);
    1737             :          */
    1738             : 
    1739           0 :         if (adev->dm.cgs_device) {
    1740           0 :                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
    1741           0 :                 adev->dm.cgs_device = NULL;
    1742             :         }
    1743           0 :         if (adev->dm.freesync_module) {
    1744           0 :                 mod_freesync_destroy(adev->dm.freesync_module);
    1745           0 :                 adev->dm.freesync_module = NULL;
    1746             :         }
    1747             : 
    1748           0 :         mutex_destroy(&adev->dm.audio_lock);
    1749           0 :         mutex_destroy(&adev->dm.dc_lock);
    1750             : 
    1751           0 :         return;
    1752             : }
    1753             : 
    1754           0 : static int load_dmcu_fw(struct amdgpu_device *adev)
    1755             : {
    1756           0 :         const char *fw_name_dmcu = NULL;
    1757             :         int r;
    1758             :         const struct dmcu_firmware_header_v1_0 *hdr;
    1759             : 
    1760           0 :         switch(adev->asic_type) {
    1761             : #if defined(CONFIG_DRM_AMD_DC_SI)
    1762             :         case CHIP_TAHITI:
    1763             :         case CHIP_PITCAIRN:
    1764             :         case CHIP_VERDE:
    1765             :         case CHIP_OLAND:
    1766             : #endif
    1767             :         case CHIP_BONAIRE:
    1768             :         case CHIP_HAWAII:
    1769             :         case CHIP_KAVERI:
    1770             :         case CHIP_KABINI:
    1771             :         case CHIP_MULLINS:
    1772             :         case CHIP_TONGA:
    1773             :         case CHIP_FIJI:
    1774             :         case CHIP_CARRIZO:
    1775             :         case CHIP_STONEY:
    1776             :         case CHIP_POLARIS11:
    1777             :         case CHIP_POLARIS10:
    1778             :         case CHIP_POLARIS12:
    1779             :         case CHIP_VEGAM:
    1780             :         case CHIP_VEGA10:
    1781             :         case CHIP_VEGA12:
    1782             :         case CHIP_VEGA20:
    1783             :                 return 0;
    1784             :         case CHIP_NAVI12:
    1785             :                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
    1786             :                 break;
    1787             :         case CHIP_RAVEN:
    1788           0 :                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
    1789             :                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
    1790           0 :                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
    1791             :                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
    1792             :                 else
    1793             :                         return 0;
    1794             :                 break;
    1795             :         default:
    1796           0 :                 switch (adev->ip_versions[DCE_HWIP][0]) {
    1797             :                 case IP_VERSION(2, 0, 2):
    1798             :                 case IP_VERSION(2, 0, 3):
    1799             :                 case IP_VERSION(2, 0, 0):
    1800             :                 case IP_VERSION(2, 1, 0):
    1801             :                 case IP_VERSION(3, 0, 0):
    1802             :                 case IP_VERSION(3, 0, 2):
    1803             :                 case IP_VERSION(3, 0, 3):
    1804             :                 case IP_VERSION(3, 0, 1):
    1805             :                 case IP_VERSION(3, 1, 2):
    1806             :                 case IP_VERSION(3, 1, 3):
    1807             :                 case IP_VERSION(3, 1, 4):
    1808             :                 case IP_VERSION(3, 1, 5):
    1809             :                 case IP_VERSION(3, 1, 6):
    1810             :                 case IP_VERSION(3, 2, 0):
    1811             :                 case IP_VERSION(3, 2, 1):
    1812             :                         return 0;
    1813             :                 default:
    1814             :                         break;
    1815             :                 }
    1816           0 :                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
    1817           0 :                 return -EINVAL;
    1818             :         }
    1819             : 
    1820           0 :         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
    1821           0 :                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
    1822           0 :                 return 0;
    1823             :         }
    1824             : 
    1825           0 :         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
    1826           0 :         if (r == -ENOENT) {
    1827             :                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
    1828           0 :                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
    1829           0 :                 adev->dm.fw_dmcu = NULL;
    1830           0 :                 return 0;
    1831             :         }
    1832           0 :         if (r) {
    1833           0 :                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
    1834             :                         fw_name_dmcu);
    1835           0 :                 return r;
    1836             :         }
    1837             : 
    1838           0 :         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
    1839           0 :         if (r) {
    1840           0 :                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
    1841             :                         fw_name_dmcu);
    1842           0 :                 release_firmware(adev->dm.fw_dmcu);
    1843           0 :                 adev->dm.fw_dmcu = NULL;
    1844           0 :                 return r;
    1845             :         }
    1846             : 
    1847           0 :         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
    1848           0 :         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
    1849           0 :         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
    1850           0 :         adev->firmware.fw_size +=
    1851           0 :                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
    1852             : 
    1853           0 :         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
    1854           0 :         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
    1855           0 :         adev->firmware.fw_size +=
    1856           0 :                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
    1857             : 
    1858           0 :         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
    1859             : 
    1860           0 :         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
    1861             : 
    1862           0 :         return 0;
    1863             : }
    1864             : 
    1865           0 : static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
    1866             : {
    1867           0 :         struct amdgpu_device *adev = ctx;
    1868             : 
    1869           0 :         return dm_read_reg(adev->dm.dc->ctx, address);
    1870             : }
    1871             : 
    1872           0 : static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
    1873             :                                      uint32_t value)
    1874             : {
    1875           0 :         struct amdgpu_device *adev = ctx;
    1876             : 
    1877           0 :         return dm_write_reg(adev->dm.dc->ctx, address, value);
    1878             : }
    1879             : 
    1880           0 : static int dm_dmub_sw_init(struct amdgpu_device *adev)
    1881             : {
    1882             :         struct dmub_srv_create_params create_params;
    1883             :         struct dmub_srv_region_params region_params;
    1884             :         struct dmub_srv_region_info region_info;
    1885             :         struct dmub_srv_fb_params fb_params;
    1886             :         struct dmub_srv_fb_info *fb_info;
    1887             :         struct dmub_srv *dmub_srv;
    1888             :         const struct dmcub_firmware_header_v1_0 *hdr;
    1889             :         const char *fw_name_dmub;
    1890             :         enum dmub_asic dmub_asic;
    1891             :         enum dmub_status status;
    1892             :         int r;
    1893             : 
    1894           0 :         switch (adev->ip_versions[DCE_HWIP][0]) {
    1895             :         case IP_VERSION(2, 1, 0):
    1896           0 :                 dmub_asic = DMUB_ASIC_DCN21;
    1897           0 :                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
    1898           0 :                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
    1899           0 :                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
    1900             :                 break;
    1901             :         case IP_VERSION(3, 0, 0):
    1902           0 :                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
    1903             :                         dmub_asic = DMUB_ASIC_DCN30;
    1904             :                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
    1905             :                 } else {
    1906           0 :                         dmub_asic = DMUB_ASIC_DCN30;
    1907           0 :                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
    1908             :                 }
    1909             :                 break;
    1910             :         case IP_VERSION(3, 0, 1):
    1911             :                 dmub_asic = DMUB_ASIC_DCN301;
    1912             :                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
    1913             :                 break;
    1914             :         case IP_VERSION(3, 0, 2):
    1915           0 :                 dmub_asic = DMUB_ASIC_DCN302;
    1916           0 :                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
    1917           0 :                 break;
    1918             :         case IP_VERSION(3, 0, 3):
    1919           0 :                 dmub_asic = DMUB_ASIC_DCN303;
    1920           0 :                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
    1921           0 :                 break;
    1922             :         case IP_VERSION(3, 1, 2):
    1923             :         case IP_VERSION(3, 1, 3):
    1924           0 :                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
    1925             :                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
    1926             :                 break;
    1927             :         case IP_VERSION(3, 1, 4):
    1928           0 :                 dmub_asic = DMUB_ASIC_DCN314;
    1929           0 :                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
    1930           0 :                 break;
    1931             :         case IP_VERSION(3, 1, 5):
    1932           0 :                 dmub_asic = DMUB_ASIC_DCN315;
    1933           0 :                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
    1934           0 :                 break;
    1935             :         case IP_VERSION(3, 1, 6):
    1936           0 :                 dmub_asic = DMUB_ASIC_DCN316;
    1937           0 :                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
    1938           0 :                 break;
    1939             :         case IP_VERSION(3, 2, 0):
    1940           0 :                 dmub_asic = DMUB_ASIC_DCN32;
    1941           0 :                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
    1942           0 :                 break;
    1943             :         case IP_VERSION(3, 2, 1):
    1944           0 :                 dmub_asic = DMUB_ASIC_DCN321;
    1945           0 :                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
    1946           0 :                 break;
    1947             :         default:
    1948             :                 /* ASIC doesn't support DMUB. */
    1949             :                 return 0;
    1950             :         }
    1951             : 
    1952           0 :         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
    1953           0 :         if (r) {
    1954           0 :                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
    1955           0 :                 return 0;
    1956             :         }
    1957             : 
    1958           0 :         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
    1959           0 :         if (r) {
    1960           0 :                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
    1961           0 :                 return 0;
    1962             :         }
    1963             : 
    1964           0 :         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
    1965           0 :         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
    1966             : 
    1967           0 :         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
    1968           0 :                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
    1969             :                         AMDGPU_UCODE_ID_DMCUB;
    1970           0 :                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
    1971             :                         adev->dm.dmub_fw;
    1972           0 :                 adev->firmware.fw_size +=
    1973           0 :                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
    1974             : 
    1975           0 :                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
    1976             :                          adev->dm.dmcub_fw_version);
    1977             :         }
    1978             : 
    1979             : 
    1980           0 :         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
    1981           0 :         dmub_srv = adev->dm.dmub_srv;
    1982             : 
    1983           0 :         if (!dmub_srv) {
    1984           0 :                 DRM_ERROR("Failed to allocate DMUB service!\n");
    1985           0 :                 return -ENOMEM;
    1986             :         }
    1987             : 
    1988           0 :         memset(&create_params, 0, sizeof(create_params));
    1989           0 :         create_params.user_ctx = adev;
    1990           0 :         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
    1991           0 :         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
    1992           0 :         create_params.asic = dmub_asic;
    1993             : 
    1994             :         /* Create the DMUB service. */
    1995           0 :         status = dmub_srv_create(dmub_srv, &create_params);
    1996           0 :         if (status != DMUB_STATUS_OK) {
    1997           0 :                 DRM_ERROR("Error creating DMUB service: %d\n", status);
    1998           0 :                 return -EINVAL;
    1999             :         }
    2000             : 
    2001             :         /* Calculate the size of all the regions for the DMUB service. */
    2002           0 :         memset(&region_params, 0, sizeof(region_params));
    2003             : 
    2004           0 :         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
    2005           0 :                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
    2006           0 :         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
    2007           0 :         region_params.vbios_size = adev->bios_size;
    2008           0 :         region_params.fw_bss_data = region_params.bss_data_size ?
    2009           0 :                 adev->dm.dmub_fw->data +
    2010           0 :                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
    2011           0 :                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
    2012           0 :         region_params.fw_inst_const =
    2013           0 :                 adev->dm.dmub_fw->data +
    2014           0 :                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
    2015             :                 PSP_HEADER_BYTES;
    2016             : 
    2017           0 :         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
    2018             :                                            &region_info);
    2019             : 
    2020           0 :         if (status != DMUB_STATUS_OK) {
    2021           0 :                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
    2022           0 :                 return -EINVAL;
    2023             :         }
    2024             : 
    2025             :         /*
    2026             :          * Allocate a framebuffer based on the total size of all the regions.
    2027             :          * TODO: Move this into GART.
    2028             :          */
    2029           0 :         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
    2030             :                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
    2031             :                                     &adev->dm.dmub_bo_gpu_addr,
    2032             :                                     &adev->dm.dmub_bo_cpu_addr);
    2033           0 :         if (r)
    2034             :                 return r;
    2035             : 
    2036             :         /* Rebase the regions on the framebuffer address. */
    2037           0 :         memset(&fb_params, 0, sizeof(fb_params));
    2038           0 :         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
    2039           0 :         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
    2040           0 :         fb_params.region_info = &region_info;
    2041             : 
    2042           0 :         adev->dm.dmub_fb_info =
    2043           0 :                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
    2044           0 :         fb_info = adev->dm.dmub_fb_info;
    2045             : 
    2046           0 :         if (!fb_info) {
    2047           0 :                 DRM_ERROR(
    2048             :                         "Failed to allocate framebuffer info for DMUB service!\n");
    2049           0 :                 return -ENOMEM;
    2050             :         }
    2051             : 
    2052           0 :         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
    2053           0 :         if (status != DMUB_STATUS_OK) {
    2054           0 :                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
    2055           0 :                 return -EINVAL;
    2056             :         }
    2057             : 
    2058             :         return 0;
    2059             : }
    2060             : 
    2061           0 : static int dm_sw_init(void *handle)
    2062             : {
    2063           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    2064             :         int r;
    2065             : 
    2066           0 :         r = dm_dmub_sw_init(adev);
    2067           0 :         if (r)
    2068             :                 return r;
    2069             : 
    2070           0 :         return load_dmcu_fw(adev);
    2071             : }
    2072             : 
    2073           0 : static int dm_sw_fini(void *handle)
    2074             : {
    2075           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    2076             : 
    2077           0 :         kfree(adev->dm.dmub_fb_info);
    2078           0 :         adev->dm.dmub_fb_info = NULL;
    2079             : 
    2080           0 :         if (adev->dm.dmub_srv) {
    2081           0 :                 dmub_srv_destroy(adev->dm.dmub_srv);
    2082           0 :                 adev->dm.dmub_srv = NULL;
    2083             :         }
    2084             : 
    2085           0 :         release_firmware(adev->dm.dmub_fw);
    2086           0 :         adev->dm.dmub_fw = NULL;
    2087             : 
    2088           0 :         release_firmware(adev->dm.fw_dmcu);
    2089           0 :         adev->dm.fw_dmcu = NULL;
    2090             : 
    2091           0 :         return 0;
    2092             : }
    2093             : 
    2094           0 : static int detect_mst_link_for_all_connectors(struct drm_device *dev)
    2095             : {
    2096             :         struct amdgpu_dm_connector *aconnector;
    2097             :         struct drm_connector *connector;
    2098             :         struct drm_connector_list_iter iter;
    2099           0 :         int ret = 0;
    2100             : 
    2101           0 :         drm_connector_list_iter_begin(dev, &iter);
    2102           0 :         drm_for_each_connector_iter(connector, &iter) {
    2103           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    2104           0 :                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
    2105           0 :                     aconnector->mst_mgr.aux) {
    2106           0 :                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
    2107             :                                          aconnector,
    2108             :                                          aconnector->base.base.id);
    2109             : 
    2110           0 :                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
    2111           0 :                         if (ret < 0) {
    2112           0 :                                 DRM_ERROR("DM_MST: Failed to start MST\n");
    2113           0 :                                 aconnector->dc_link->type =
    2114             :                                         dc_connection_single;
    2115           0 :                                 break;
    2116             :                         }
    2117             :                 }
    2118             :         }
    2119           0 :         drm_connector_list_iter_end(&iter);
    2120             : 
    2121           0 :         return ret;
    2122             : }
    2123             : 
    2124           0 : static int dm_late_init(void *handle)
    2125             : {
    2126           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    2127             : 
    2128             :         struct dmcu_iram_parameters params;
    2129             :         unsigned int linear_lut[16];
    2130             :         int i;
    2131           0 :         struct dmcu *dmcu = NULL;
    2132             : 
    2133           0 :         dmcu = adev->dm.dc->res_pool->dmcu;
    2134             : 
    2135           0 :         for (i = 0; i < 16; i++)
    2136           0 :                 linear_lut[i] = 0xFFFF * i / 15;
    2137             : 
    2138           0 :         params.set = 0;
    2139           0 :         params.backlight_ramping_override = false;
    2140           0 :         params.backlight_ramping_start = 0xCCCC;
    2141           0 :         params.backlight_ramping_reduction = 0xCCCCCCCC;
    2142           0 :         params.backlight_lut_array_size = 16;
    2143           0 :         params.backlight_lut_array = linear_lut;
    2144             : 
    2145             :         /* Min backlight level after ABM reduction,  Don't allow below 1%
    2146             :          * 0xFFFF x 0.01 = 0x28F
    2147             :          */
    2148           0 :         params.min_abm_backlight = 0x28F;
    2149             :         /* In the case where abm is implemented on dmcub,
    2150             :         * dmcu object will be null.
    2151             :         * ABM 2.4 and up are implemented on dmcub.
    2152             :         */
    2153           0 :         if (dmcu) {
    2154           0 :                 if (!dmcu_load_iram(dmcu, params))
    2155             :                         return -EINVAL;
    2156           0 :         } else if (adev->dm.dc->ctx->dmub_srv) {
    2157             :                 struct dc_link *edp_links[MAX_NUM_EDP];
    2158             :                 int edp_num;
    2159             : 
    2160           0 :                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
    2161           0 :                 for (i = 0; i < edp_num; i++) {
    2162           0 :                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
    2163           0 :                                 return -EINVAL;
    2164             :                 }
    2165             :         }
    2166             : 
    2167           0 :         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
    2168             : }
    2169             : 
    2170           0 : static void s3_handle_mst(struct drm_device *dev, bool suspend)
    2171             : {
    2172             :         struct amdgpu_dm_connector *aconnector;
    2173             :         struct drm_connector *connector;
    2174             :         struct drm_connector_list_iter iter;
    2175             :         struct drm_dp_mst_topology_mgr *mgr;
    2176             :         int ret;
    2177           0 :         bool need_hotplug = false;
    2178             : 
    2179           0 :         drm_connector_list_iter_begin(dev, &iter);
    2180           0 :         drm_for_each_connector_iter(connector, &iter) {
    2181           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    2182           0 :                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
    2183           0 :                     aconnector->mst_port)
    2184           0 :                         continue;
    2185             : 
    2186           0 :                 mgr = &aconnector->mst_mgr;
    2187             : 
    2188           0 :                 if (suspend) {
    2189           0 :                         drm_dp_mst_topology_mgr_suspend(mgr);
    2190             :                 } else {
    2191           0 :                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
    2192           0 :                         if (ret < 0) {
    2193           0 :                                 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
    2194             :                                         aconnector->dc_link);
    2195           0 :                                 need_hotplug = true;
    2196             :                         }
    2197             :                 }
    2198             :         }
    2199           0 :         drm_connector_list_iter_end(&iter);
    2200             : 
    2201           0 :         if (need_hotplug)
    2202           0 :                 drm_kms_helper_hotplug_event(dev);
    2203           0 : }
    2204             : 
    2205           0 : static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
    2206             : {
    2207           0 :         int ret = 0;
    2208             : 
    2209             :         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
    2210             :          * on window driver dc implementation.
    2211             :          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
    2212             :          * should be passed to smu during boot up and resume from s3.
    2213             :          * boot up: dc calculate dcn watermark clock settings within dc_create,
    2214             :          * dcn20_resource_construct
    2215             :          * then call pplib functions below to pass the settings to smu:
    2216             :          * smu_set_watermarks_for_clock_ranges
    2217             :          * smu_set_watermarks_table
    2218             :          * navi10_set_watermarks_table
    2219             :          * smu_write_watermarks_table
    2220             :          *
    2221             :          * For Renoir, clock settings of dcn watermark are also fixed values.
    2222             :          * dc has implemented different flow for window driver:
    2223             :          * dc_hardware_init / dc_set_power_state
    2224             :          * dcn10_init_hw
    2225             :          * notify_wm_ranges
    2226             :          * set_wm_ranges
    2227             :          * -- Linux
    2228             :          * smu_set_watermarks_for_clock_ranges
    2229             :          * renoir_set_watermarks_table
    2230             :          * smu_write_watermarks_table
    2231             :          *
    2232             :          * For Linux,
    2233             :          * dc_hardware_init -> amdgpu_dm_init
    2234             :          * dc_set_power_state --> dm_resume
    2235             :          *
    2236             :          * therefore, this function apply to navi10/12/14 but not Renoir
    2237             :          * *
    2238             :          */
    2239           0 :         switch (adev->ip_versions[DCE_HWIP][0]) {
    2240             :         case IP_VERSION(2, 0, 2):
    2241             :         case IP_VERSION(2, 0, 0):
    2242             :                 break;
    2243             :         default:
    2244             :                 return 0;
    2245             :         }
    2246             : 
    2247           0 :         ret = amdgpu_dpm_write_watermarks_table(adev);
    2248           0 :         if (ret) {
    2249           0 :                 DRM_ERROR("Failed to update WMTABLE!\n");
    2250           0 :                 return ret;
    2251             :         }
    2252             : 
    2253             :         return 0;
    2254             : }
    2255             : 
    2256             : /**
    2257             :  * dm_hw_init() - Initialize DC device
    2258             :  * @handle: The base driver device containing the amdgpu_dm device.
    2259             :  *
    2260             :  * Initialize the &struct amdgpu_display_manager device. This involves calling
    2261             :  * the initializers of each DM component, then populating the struct with them.
    2262             :  *
    2263             :  * Although the function implies hardware initialization, both hardware and
    2264             :  * software are initialized here. Splitting them out to their relevant init
    2265             :  * hooks is a future TODO item.
    2266             :  *
    2267             :  * Some notable things that are initialized here:
    2268             :  *
    2269             :  * - Display Core, both software and hardware
    2270             :  * - DC modules that we need (freesync and color management)
    2271             :  * - DRM software states
    2272             :  * - Interrupt sources and handlers
    2273             :  * - Vblank support
    2274             :  * - Debug FS entries, if enabled
    2275             :  */
    2276           0 : static int dm_hw_init(void *handle)
    2277             : {
    2278           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    2279             :         /* Create DAL display manager */
    2280           0 :         amdgpu_dm_init(adev);
    2281           0 :         amdgpu_dm_hpd_init(adev);
    2282             : 
    2283           0 :         return 0;
    2284             : }
    2285             : 
    2286             : /**
    2287             :  * dm_hw_fini() - Teardown DC device
    2288             :  * @handle: The base driver device containing the amdgpu_dm device.
    2289             :  *
    2290             :  * Teardown components within &struct amdgpu_display_manager that require
    2291             :  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
    2292             :  * were loaded. Also flush IRQ workqueues and disable them.
    2293             :  */
    2294           0 : static int dm_hw_fini(void *handle)
    2295             : {
    2296           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    2297             : 
    2298           0 :         amdgpu_dm_hpd_fini(adev);
    2299             : 
    2300           0 :         amdgpu_dm_irq_fini(adev);
    2301           0 :         amdgpu_dm_fini(adev);
    2302           0 :         return 0;
    2303             : }
    2304             : 
    2305             : 
    2306           0 : static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
    2307             :                                  struct dc_state *state, bool enable)
    2308             : {
    2309             :         enum dc_irq_source irq_source;
    2310             :         struct amdgpu_crtc *acrtc;
    2311           0 :         int rc = -EBUSY;
    2312           0 :         int i = 0;
    2313             : 
    2314           0 :         for (i = 0; i < state->stream_count; i++) {
    2315           0 :                 acrtc = get_crtc_by_otg_inst(
    2316             :                                 adev, state->stream_status[i].primary_otg_inst);
    2317             : 
    2318           0 :                 if (acrtc && state->stream_status[i].plane_count != 0) {
    2319           0 :                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
    2320           0 :                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
    2321           0 :                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
    2322             :                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
    2323           0 :                         if (rc)
    2324           0 :                                 DRM_WARN("Failed to %s pflip interrupts\n",
    2325             :                                          enable ? "enable" : "disable");
    2326             : 
    2327           0 :                         if (enable) {
    2328           0 :                                 rc = dm_enable_vblank(&acrtc->base);
    2329           0 :                                 if (rc)
    2330           0 :                                         DRM_WARN("Failed to enable vblank interrupts\n");
    2331             :                         } else {
    2332           0 :                                 dm_disable_vblank(&acrtc->base);
    2333             :                         }
    2334             : 
    2335             :                 }
    2336             :         }
    2337             : 
    2338           0 : }
    2339             : 
    2340           0 : static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
    2341             : {
    2342           0 :         struct dc_state *context = NULL;
    2343           0 :         enum dc_status res = DC_ERROR_UNEXPECTED;
    2344             :         int i;
    2345             :         struct dc_stream_state *del_streams[MAX_PIPES];
    2346           0 :         int del_streams_count = 0;
    2347             : 
    2348           0 :         memset(del_streams, 0, sizeof(del_streams));
    2349             : 
    2350           0 :         context = dc_create_state(dc);
    2351           0 :         if (context == NULL)
    2352             :                 goto context_alloc_fail;
    2353             : 
    2354           0 :         dc_resource_state_copy_construct_current(dc, context);
    2355             : 
    2356             :         /* First remove from context all streams */
    2357           0 :         for (i = 0; i < context->stream_count; i++) {
    2358           0 :                 struct dc_stream_state *stream = context->streams[i];
    2359             : 
    2360           0 :                 del_streams[del_streams_count++] = stream;
    2361             :         }
    2362             : 
    2363             :         /* Remove all planes for removed streams and then remove the streams */
    2364           0 :         for (i = 0; i < del_streams_count; i++) {
    2365           0 :                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
    2366             :                         res = DC_FAIL_DETACH_SURFACES;
    2367             :                         goto fail;
    2368             :                 }
    2369             : 
    2370           0 :                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
    2371           0 :                 if (res != DC_OK)
    2372             :                         goto fail;
    2373             :         }
    2374             : 
    2375           0 :         res = dc_commit_state(dc, context);
    2376             : 
    2377             : fail:
    2378           0 :         dc_release_state(context);
    2379             : 
    2380             : context_alloc_fail:
    2381           0 :         return res;
    2382             : }
    2383             : 
    2384           0 : static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
    2385             : {
    2386             :         int i;
    2387             : 
    2388           0 :         if (dm->hpd_rx_offload_wq) {
    2389           0 :                 for (i = 0; i < dm->dc->caps.max_links; i++)
    2390           0 :                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
    2391             :         }
    2392           0 : }
    2393             : 
    2394           0 : static int dm_suspend(void *handle)
    2395             : {
    2396           0 :         struct amdgpu_device *adev = handle;
    2397           0 :         struct amdgpu_display_manager *dm = &adev->dm;
    2398           0 :         int ret = 0;
    2399             : 
    2400           0 :         if (amdgpu_in_reset(adev)) {
    2401           0 :                 mutex_lock(&dm->dc_lock);
    2402             : 
    2403           0 :                 dc_allow_idle_optimizations(adev->dm.dc, false);
    2404             : 
    2405           0 :                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
    2406             : 
    2407           0 :                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
    2408             : 
    2409           0 :                 amdgpu_dm_commit_zero_streams(dm->dc);
    2410             : 
    2411           0 :                 amdgpu_dm_irq_suspend(adev);
    2412             : 
    2413           0 :                 hpd_rx_irq_work_suspend(dm);
    2414             : 
    2415           0 :                 return ret;
    2416             :         }
    2417             : 
    2418           0 :         WARN_ON(adev->dm.cached_state);
    2419           0 :         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
    2420             : 
    2421           0 :         s3_handle_mst(adev_to_drm(adev), true);
    2422             : 
    2423           0 :         amdgpu_dm_irq_suspend(adev);
    2424             : 
    2425           0 :         hpd_rx_irq_work_suspend(dm);
    2426             : 
    2427           0 :         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
    2428             : 
    2429           0 :         return 0;
    2430             : }
    2431             : 
    2432             : struct amdgpu_dm_connector *
    2433           0 : amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
    2434             :                                              struct drm_crtc *crtc)
    2435             : {
    2436             :         uint32_t i;
    2437             :         struct drm_connector_state *new_con_state;
    2438             :         struct drm_connector *connector;
    2439             :         struct drm_crtc *crtc_from_state;
    2440             : 
    2441           0 :         for_each_new_connector_in_state(state, connector, new_con_state, i) {
    2442           0 :                 crtc_from_state = new_con_state->crtc;
    2443             : 
    2444           0 :                 if (crtc_from_state == crtc)
    2445             :                         return to_amdgpu_dm_connector(connector);
    2446             :         }
    2447             : 
    2448             :         return NULL;
    2449             : }
    2450             : 
    2451           0 : static void emulated_link_detect(struct dc_link *link)
    2452             : {
    2453           0 :         struct dc_sink_init_data sink_init_data = { 0 };
    2454           0 :         struct display_sink_capability sink_caps = { 0 };
    2455             :         enum dc_edid_status edid_status;
    2456           0 :         struct dc_context *dc_ctx = link->ctx;
    2457           0 :         struct dc_sink *sink = NULL;
    2458           0 :         struct dc_sink *prev_sink = NULL;
    2459             : 
    2460           0 :         link->type = dc_connection_none;
    2461           0 :         prev_sink = link->local_sink;
    2462             : 
    2463           0 :         if (prev_sink)
    2464           0 :                 dc_sink_release(prev_sink);
    2465             : 
    2466           0 :         switch (link->connector_signal) {
    2467             :         case SIGNAL_TYPE_HDMI_TYPE_A: {
    2468             :                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
    2469             :                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
    2470             :                 break;
    2471             :         }
    2472             : 
    2473             :         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
    2474           0 :                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
    2475           0 :                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
    2476           0 :                 break;
    2477             :         }
    2478             : 
    2479             :         case SIGNAL_TYPE_DVI_DUAL_LINK: {
    2480           0 :                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
    2481           0 :                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
    2482           0 :                 break;
    2483             :         }
    2484             : 
    2485             :         case SIGNAL_TYPE_LVDS: {
    2486           0 :                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
    2487           0 :                 sink_caps.signal = SIGNAL_TYPE_LVDS;
    2488           0 :                 break;
    2489             :         }
    2490             : 
    2491             :         case SIGNAL_TYPE_EDP: {
    2492           0 :                 sink_caps.transaction_type =
    2493             :                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
    2494           0 :                 sink_caps.signal = SIGNAL_TYPE_EDP;
    2495           0 :                 break;
    2496             :         }
    2497             : 
    2498             :         case SIGNAL_TYPE_DISPLAY_PORT: {
    2499           0 :                 sink_caps.transaction_type =
    2500             :                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
    2501           0 :                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
    2502           0 :                 break;
    2503             :         }
    2504             : 
    2505             :         default:
    2506           0 :                 DC_ERROR("Invalid connector type! signal:%d\n",
    2507             :                         link->connector_signal);
    2508           0 :                 return;
    2509             :         }
    2510             : 
    2511           0 :         sink_init_data.link = link;
    2512           0 :         sink_init_data.sink_signal = sink_caps.signal;
    2513             : 
    2514           0 :         sink = dc_sink_create(&sink_init_data);
    2515           0 :         if (!sink) {
    2516           0 :                 DC_ERROR("Failed to create sink!\n");
    2517           0 :                 return;
    2518             :         }
    2519             : 
    2520             :         /* dc_sink_create returns a new reference */
    2521           0 :         link->local_sink = sink;
    2522             : 
    2523           0 :         edid_status = dm_helpers_read_local_edid(
    2524             :                         link->ctx,
    2525             :                         link,
    2526             :                         sink);
    2527             : 
    2528           0 :         if (edid_status != EDID_OK)
    2529           0 :                 DC_ERROR("Failed to read EDID");
    2530             : 
    2531             : }
    2532             : 
    2533           0 : static void dm_gpureset_commit_state(struct dc_state *dc_state,
    2534             :                                      struct amdgpu_display_manager *dm)
    2535             : {
    2536             :         struct {
    2537             :                 struct dc_surface_update surface_updates[MAX_SURFACES];
    2538             :                 struct dc_plane_info plane_infos[MAX_SURFACES];
    2539             :                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
    2540             :                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
    2541             :                 struct dc_stream_update stream_update;
    2542             :         } * bundle;
    2543             :         int k, m;
    2544             : 
    2545           0 :         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
    2546             : 
    2547           0 :         if (!bundle) {
    2548           0 :                 dm_error("Failed to allocate update bundle\n");
    2549             :                 goto cleanup;
    2550             :         }
    2551             : 
    2552           0 :         for (k = 0; k < dc_state->stream_count; k++) {
    2553           0 :                 bundle->stream_update.stream = dc_state->streams[k];
    2554             : 
    2555           0 :                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
    2556           0 :                         bundle->surface_updates[m].surface =
    2557           0 :                                 dc_state->stream_status->plane_states[m];
    2558           0 :                         bundle->surface_updates[m].surface->force_full_update =
    2559             :                                 true;
    2560             :                 }
    2561           0 :                 dc_commit_updates_for_stream(
    2562           0 :                         dm->dc, bundle->surface_updates,
    2563             :                         dc_state->stream_status->plane_count,
    2564             :                         dc_state->streams[k], &bundle->stream_update, dc_state);
    2565             :         }
    2566             : 
    2567             : cleanup:
    2568           0 :         kfree(bundle);
    2569             : 
    2570           0 :         return;
    2571             : }
    2572             : 
    2573           0 : static int dm_resume(void *handle)
    2574             : {
    2575           0 :         struct amdgpu_device *adev = handle;
    2576           0 :         struct drm_device *ddev = adev_to_drm(adev);
    2577           0 :         struct amdgpu_display_manager *dm = &adev->dm;
    2578             :         struct amdgpu_dm_connector *aconnector;
    2579             :         struct drm_connector *connector;
    2580             :         struct drm_connector_list_iter iter;
    2581             :         struct drm_crtc *crtc;
    2582             :         struct drm_crtc_state *new_crtc_state;
    2583             :         struct dm_crtc_state *dm_new_crtc_state;
    2584             :         struct drm_plane *plane;
    2585             :         struct drm_plane_state *new_plane_state;
    2586             :         struct dm_plane_state *dm_new_plane_state;
    2587           0 :         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
    2588           0 :         enum dc_connection_type new_connection_type = dc_connection_none;
    2589             :         struct dc_state *dc_state;
    2590             :         int i, r, j;
    2591             : 
    2592           0 :         if (amdgpu_in_reset(adev)) {
    2593           0 :                 dc_state = dm->cached_dc_state;
    2594             : 
    2595             :                 /*
    2596             :                  * The dc->current_state is backed up into dm->cached_dc_state
    2597             :                  * before we commit 0 streams.
    2598             :                  *
    2599             :                  * DC will clear link encoder assignments on the real state
    2600             :                  * but the changes won't propagate over to the copy we made
    2601             :                  * before the 0 streams commit.
    2602             :                  *
    2603             :                  * DC expects that link encoder assignments are *not* valid
    2604             :                  * when committing a state, so as a workaround we can copy
    2605             :                  * off of the current state.
    2606             :                  *
    2607             :                  * We lose the previous assignments, but we had already
    2608             :                  * commit 0 streams anyway.
    2609             :                  */
    2610           0 :                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
    2611             : 
    2612           0 :                 r = dm_dmub_hw_init(adev);
    2613           0 :                 if (r)
    2614           0 :                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
    2615             : 
    2616           0 :                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
    2617           0 :                 dc_resume(dm->dc);
    2618             : 
    2619           0 :                 amdgpu_dm_irq_resume_early(adev);
    2620             : 
    2621           0 :                 for (i = 0; i < dc_state->stream_count; i++) {
    2622           0 :                         dc_state->streams[i]->mode_changed = true;
    2623           0 :                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
    2624           0 :                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
    2625           0 :                                         = 0xffffffff;
    2626             :                         }
    2627             :                 }
    2628             : 
    2629           0 :                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
    2630           0 :                         amdgpu_dm_outbox_init(adev);
    2631           0 :                         dc_enable_dmub_outbox(adev->dm.dc);
    2632             :                 }
    2633             : 
    2634           0 :                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
    2635             : 
    2636           0 :                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
    2637             : 
    2638           0 :                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
    2639             : 
    2640           0 :                 dc_release_state(dm->cached_dc_state);
    2641           0 :                 dm->cached_dc_state = NULL;
    2642             : 
    2643           0 :                 amdgpu_dm_irq_resume_late(adev);
    2644             : 
    2645           0 :                 mutex_unlock(&dm->dc_lock);
    2646             : 
    2647           0 :                 return 0;
    2648             :         }
    2649             :         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
    2650           0 :         dc_release_state(dm_state->context);
    2651           0 :         dm_state->context = dc_create_state(dm->dc);
    2652             :         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
    2653           0 :         dc_resource_state_construct(dm->dc, dm_state->context);
    2654             : 
    2655             :         /* Before powering on DC we need to re-initialize DMUB. */
    2656           0 :         dm_dmub_hw_resume(adev);
    2657             : 
    2658             :         /* Re-enable outbox interrupts for DPIA. */
    2659           0 :         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
    2660           0 :                 amdgpu_dm_outbox_init(adev);
    2661           0 :                 dc_enable_dmub_outbox(adev->dm.dc);
    2662             :         }
    2663             : 
    2664             :         /* power on hardware */
    2665           0 :         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
    2666             : 
    2667             :         /* program HPD filter */
    2668           0 :         dc_resume(dm->dc);
    2669             : 
    2670             :         /*
    2671             :          * early enable HPD Rx IRQ, should be done before set mode as short
    2672             :          * pulse interrupts are used for MST
    2673             :          */
    2674           0 :         amdgpu_dm_irq_resume_early(adev);
    2675             : 
    2676             :         /* On resume we need to rewrite the MSTM control bits to enable MST*/
    2677           0 :         s3_handle_mst(ddev, false);
    2678             : 
    2679             :         /* Do detection*/
    2680           0 :         drm_connector_list_iter_begin(ddev, &iter);
    2681           0 :         drm_for_each_connector_iter(connector, &iter) {
    2682           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    2683             : 
    2684             :                 /*
    2685             :                  * this is the case when traversing through already created
    2686             :                  * MST connectors, should be skipped
    2687             :                  */
    2688           0 :                 if (aconnector->dc_link &&
    2689           0 :                     aconnector->dc_link->type == dc_connection_mst_branch)
    2690           0 :                         continue;
    2691             : 
    2692           0 :                 mutex_lock(&aconnector->hpd_lock);
    2693           0 :                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
    2694           0 :                         DRM_ERROR("KMS: Failed to detect connector\n");
    2695             : 
    2696           0 :                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
    2697           0 :                         emulated_link_detect(aconnector->dc_link);
    2698             :                 } else {
    2699           0 :                         mutex_lock(&dm->dc_lock);
    2700           0 :                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
    2701           0 :                         mutex_unlock(&dm->dc_lock);
    2702             :                 }
    2703             : 
    2704           0 :                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
    2705           0 :                         aconnector->fake_enable = false;
    2706             : 
    2707           0 :                 if (aconnector->dc_sink)
    2708           0 :                         dc_sink_release(aconnector->dc_sink);
    2709           0 :                 aconnector->dc_sink = NULL;
    2710           0 :                 amdgpu_dm_update_connector_after_detect(aconnector);
    2711           0 :                 mutex_unlock(&aconnector->hpd_lock);
    2712             :         }
    2713           0 :         drm_connector_list_iter_end(&iter);
    2714             : 
    2715             :         /* Force mode set in atomic commit */
    2716           0 :         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
    2717           0 :                 new_crtc_state->active_changed = true;
    2718             : 
    2719             :         /*
    2720             :          * atomic_check is expected to create the dc states. We need to release
    2721             :          * them here, since they were duplicated as part of the suspend
    2722             :          * procedure.
    2723             :          */
    2724           0 :         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
    2725           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    2726           0 :                 if (dm_new_crtc_state->stream) {
    2727           0 :                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
    2728           0 :                         dc_stream_release(dm_new_crtc_state->stream);
    2729           0 :                         dm_new_crtc_state->stream = NULL;
    2730             :                 }
    2731             :         }
    2732             : 
    2733           0 :         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
    2734           0 :                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
    2735           0 :                 if (dm_new_plane_state->dc_state) {
    2736           0 :                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
    2737           0 :                         dc_plane_state_release(dm_new_plane_state->dc_state);
    2738           0 :                         dm_new_plane_state->dc_state = NULL;
    2739             :                 }
    2740             :         }
    2741             : 
    2742           0 :         drm_atomic_helper_resume(ddev, dm->cached_state);
    2743             : 
    2744           0 :         dm->cached_state = NULL;
    2745             : 
    2746           0 :         amdgpu_dm_irq_resume_late(adev);
    2747             : 
    2748           0 :         amdgpu_dm_smu_write_watermarks_table(adev);
    2749             : 
    2750           0 :         return 0;
    2751             : }
    2752             : 
    2753             : /**
    2754             :  * DOC: DM Lifecycle
    2755             :  *
    2756             :  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
    2757             :  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
    2758             :  * the base driver's device list to be initialized and torn down accordingly.
    2759             :  *
    2760             :  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
    2761             :  */
    2762             : 
    2763             : static const struct amd_ip_funcs amdgpu_dm_funcs = {
    2764             :         .name = "dm",
    2765             :         .early_init = dm_early_init,
    2766             :         .late_init = dm_late_init,
    2767             :         .sw_init = dm_sw_init,
    2768             :         .sw_fini = dm_sw_fini,
    2769             :         .early_fini = amdgpu_dm_early_fini,
    2770             :         .hw_init = dm_hw_init,
    2771             :         .hw_fini = dm_hw_fini,
    2772             :         .suspend = dm_suspend,
    2773             :         .resume = dm_resume,
    2774             :         .is_idle = dm_is_idle,
    2775             :         .wait_for_idle = dm_wait_for_idle,
    2776             :         .check_soft_reset = dm_check_soft_reset,
    2777             :         .soft_reset = dm_soft_reset,
    2778             :         .set_clockgating_state = dm_set_clockgating_state,
    2779             :         .set_powergating_state = dm_set_powergating_state,
    2780             : };
    2781             : 
    2782             : const struct amdgpu_ip_block_version dm_ip_block =
    2783             : {
    2784             :         .type = AMD_IP_BLOCK_TYPE_DCE,
    2785             :         .major = 1,
    2786             :         .minor = 0,
    2787             :         .rev = 0,
    2788             :         .funcs = &amdgpu_dm_funcs,
    2789             : };
    2790             : 
    2791             : 
    2792             : /**
    2793             :  * DOC: atomic
    2794             :  *
    2795             :  * *WIP*
    2796             :  */
    2797             : 
    2798             : static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
    2799             :         .fb_create = amdgpu_display_user_framebuffer_create,
    2800             :         .get_format_info = amd_get_format_info,
    2801             :         .output_poll_changed = drm_fb_helper_output_poll_changed,
    2802             :         .atomic_check = amdgpu_dm_atomic_check,
    2803             :         .atomic_commit = drm_atomic_helper_commit,
    2804             : };
    2805             : 
    2806             : static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
    2807             :         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
    2808             : };
    2809             : 
    2810           0 : static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
    2811             : {
    2812             :         u32 max_avg, min_cll, max, min, q, r;
    2813             :         struct amdgpu_dm_backlight_caps *caps;
    2814             :         struct amdgpu_display_manager *dm;
    2815             :         struct drm_connector *conn_base;
    2816             :         struct amdgpu_device *adev;
    2817           0 :         struct dc_link *link = NULL;
    2818             :         static const u8 pre_computed_values[] = {
    2819             :                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
    2820             :                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
    2821             :         int i;
    2822             : 
    2823           0 :         if (!aconnector || !aconnector->dc_link)
    2824             :                 return;
    2825             : 
    2826           0 :         link = aconnector->dc_link;
    2827           0 :         if (link->connector_signal != SIGNAL_TYPE_EDP)
    2828             :                 return;
    2829             : 
    2830           0 :         conn_base = &aconnector->base;
    2831           0 :         adev = drm_to_adev(conn_base->dev);
    2832           0 :         dm = &adev->dm;
    2833           0 :         for (i = 0; i < dm->num_of_edps; i++) {
    2834           0 :                 if (link == dm->backlight_link[i])
    2835             :                         break;
    2836             :         }
    2837           0 :         if (i >= dm->num_of_edps)
    2838             :                 return;
    2839           0 :         caps = &dm->backlight_caps[i];
    2840           0 :         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
    2841           0 :         caps->aux_support = false;
    2842           0 :         max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
    2843           0 :         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
    2844             : 
    2845           0 :         if (caps->ext_caps->bits.oled == 1 /*||
    2846             :             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
    2847             :             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
    2848           0 :                 caps->aux_support = true;
    2849             : 
    2850           0 :         if (amdgpu_backlight == 0)
    2851           0 :                 caps->aux_support = false;
    2852           0 :         else if (amdgpu_backlight == 1)
    2853           0 :                 caps->aux_support = true;
    2854             : 
    2855             :         /* From the specification (CTA-861-G), for calculating the maximum
    2856             :          * luminance we need to use:
    2857             :          *      Luminance = 50*2**(CV/32)
    2858             :          * Where CV is a one-byte value.
    2859             :          * For calculating this expression we may need float point precision;
    2860             :          * to avoid this complexity level, we take advantage that CV is divided
    2861             :          * by a constant. From the Euclids division algorithm, we know that CV
    2862             :          * can be written as: CV = 32*q + r. Next, we replace CV in the
    2863             :          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
    2864             :          * need to pre-compute the value of r/32. For pre-computing the values
    2865             :          * We just used the following Ruby line:
    2866             :          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
    2867             :          * The results of the above expressions can be verified at
    2868             :          * pre_computed_values.
    2869             :          */
    2870           0 :         q = max_avg >> 5;
    2871           0 :         r = max_avg % 32;
    2872           0 :         max = (1 << q) * pre_computed_values[r];
    2873             : 
    2874             :         // min luminance: maxLum * (CV/255)^2 / 100
    2875           0 :         q = DIV_ROUND_CLOSEST(min_cll, 255);
    2876           0 :         min = max * DIV_ROUND_CLOSEST((q * q), 100);
    2877             : 
    2878           0 :         caps->aux_max_input_signal = max;
    2879           0 :         caps->aux_min_input_signal = min;
    2880             : }
    2881             : 
    2882           0 : void amdgpu_dm_update_connector_after_detect(
    2883             :                 struct amdgpu_dm_connector *aconnector)
    2884             : {
    2885           0 :         struct drm_connector *connector = &aconnector->base;
    2886           0 :         struct drm_device *dev = connector->dev;
    2887             :         struct dc_sink *sink;
    2888             : 
    2889             :         /* MST handled by drm_mst framework */
    2890           0 :         if (aconnector->mst_mgr.mst_state == true)
    2891             :                 return;
    2892             : 
    2893           0 :         sink = aconnector->dc_link->local_sink;
    2894           0 :         if (sink)
    2895           0 :                 dc_sink_retain(sink);
    2896             : 
    2897             :         /*
    2898             :          * Edid mgmt connector gets first update only in mode_valid hook and then
    2899             :          * the connector sink is set to either fake or physical sink depends on link status.
    2900             :          * Skip if already done during boot.
    2901             :          */
    2902           0 :         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
    2903           0 :                         && aconnector->dc_em_sink) {
    2904             : 
    2905             :                 /*
    2906             :                  * For S3 resume with headless use eml_sink to fake stream
    2907             :                  * because on resume connector->sink is set to NULL
    2908             :                  */
    2909           0 :                 mutex_lock(&dev->mode_config.mutex);
    2910             : 
    2911           0 :                 if (sink) {
    2912           0 :                         if (aconnector->dc_sink) {
    2913           0 :                                 amdgpu_dm_update_freesync_caps(connector, NULL);
    2914             :                                 /*
    2915             :                                  * retain and release below are used to
    2916             :                                  * bump up refcount for sink because the link doesn't point
    2917             :                                  * to it anymore after disconnect, so on next crtc to connector
    2918             :                                  * reshuffle by UMD we will get into unwanted dc_sink release
    2919             :                                  */
    2920           0 :                                 dc_sink_release(aconnector->dc_sink);
    2921             :                         }
    2922           0 :                         aconnector->dc_sink = sink;
    2923           0 :                         dc_sink_retain(aconnector->dc_sink);
    2924           0 :                         amdgpu_dm_update_freesync_caps(connector,
    2925             :                                         aconnector->edid);
    2926             :                 } else {
    2927           0 :                         amdgpu_dm_update_freesync_caps(connector, NULL);
    2928           0 :                         if (!aconnector->dc_sink) {
    2929           0 :                                 aconnector->dc_sink = aconnector->dc_em_sink;
    2930           0 :                                 dc_sink_retain(aconnector->dc_sink);
    2931             :                         }
    2932             :                 }
    2933             : 
    2934           0 :                 mutex_unlock(&dev->mode_config.mutex);
    2935             : 
    2936           0 :                 if (sink)
    2937           0 :                         dc_sink_release(sink);
    2938             :                 return;
    2939             :         }
    2940             : 
    2941             :         /*
    2942             :          * TODO: temporary guard to look for proper fix
    2943             :          * if this sink is MST sink, we should not do anything
    2944             :          */
    2945           0 :         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
    2946           0 :                 dc_sink_release(sink);
    2947           0 :                 return;
    2948             :         }
    2949             : 
    2950           0 :         if (aconnector->dc_sink == sink) {
    2951             :                 /*
    2952             :                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
    2953             :                  * Do nothing!!
    2954             :                  */
    2955           0 :                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
    2956             :                                 aconnector->connector_id);
    2957           0 :                 if (sink)
    2958           0 :                         dc_sink_release(sink);
    2959             :                 return;
    2960             :         }
    2961             : 
    2962           0 :         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
    2963             :                 aconnector->connector_id, aconnector->dc_sink, sink);
    2964             : 
    2965           0 :         mutex_lock(&dev->mode_config.mutex);
    2966             : 
    2967             :         /*
    2968             :          * 1. Update status of the drm connector
    2969             :          * 2. Send an event and let userspace tell us what to do
    2970             :          */
    2971           0 :         if (sink) {
    2972             :                 /*
    2973             :                  * TODO: check if we still need the S3 mode update workaround.
    2974             :                  * If yes, put it here.
    2975             :                  */
    2976           0 :                 if (aconnector->dc_sink) {
    2977           0 :                         amdgpu_dm_update_freesync_caps(connector, NULL);
    2978           0 :                         dc_sink_release(aconnector->dc_sink);
    2979             :                 }
    2980             : 
    2981           0 :                 aconnector->dc_sink = sink;
    2982           0 :                 dc_sink_retain(aconnector->dc_sink);
    2983           0 :                 if (sink->dc_edid.length == 0) {
    2984           0 :                         aconnector->edid = NULL;
    2985             :                         if (aconnector->dc_link->aux_mode) {
    2986             :                                 drm_dp_cec_unset_edid(
    2987             :                                         &aconnector->dm_dp_aux.aux);
    2988             :                         }
    2989             :                 } else {
    2990           0 :                         aconnector->edid =
    2991           0 :                                 (struct edid *)sink->dc_edid.raw_edid;
    2992             : 
    2993             :                         if (aconnector->dc_link->aux_mode)
    2994             :                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
    2995             :                                                     aconnector->edid);
    2996             :                 }
    2997             : 
    2998           0 :                 drm_connector_update_edid_property(connector, aconnector->edid);
    2999           0 :                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
    3000           0 :                 update_connector_ext_caps(aconnector);
    3001             :         } else {
    3002           0 :                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
    3003           0 :                 amdgpu_dm_update_freesync_caps(connector, NULL);
    3004           0 :                 drm_connector_update_edid_property(connector, NULL);
    3005           0 :                 aconnector->num_modes = 0;
    3006           0 :                 dc_sink_release(aconnector->dc_sink);
    3007           0 :                 aconnector->dc_sink = NULL;
    3008           0 :                 aconnector->edid = NULL;
    3009             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    3010             :                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
    3011             :                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
    3012             :                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
    3013             : #endif
    3014             :         }
    3015             : 
    3016           0 :         mutex_unlock(&dev->mode_config.mutex);
    3017             : 
    3018           0 :         update_subconnector_property(aconnector);
    3019             : 
    3020           0 :         if (sink)
    3021           0 :                 dc_sink_release(sink);
    3022             : }
    3023             : 
    3024           0 : static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
    3025             : {
    3026           0 :         struct drm_connector *connector = &aconnector->base;
    3027           0 :         struct drm_device *dev = connector->dev;
    3028           0 :         enum dc_connection_type new_connection_type = dc_connection_none;
    3029           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    3030             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    3031             :         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
    3032             : #endif
    3033           0 :         bool ret = false;
    3034             : 
    3035           0 :         if (adev->dm.disable_hpd_irq)
    3036           0 :                 return;
    3037             : 
    3038             :         /*
    3039             :          * In case of failure or MST no need to update connector status or notify the OS
    3040             :          * since (for MST case) MST does this in its own context.
    3041             :          */
    3042           0 :         mutex_lock(&aconnector->hpd_lock);
    3043             : 
    3044             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    3045             :         if (adev->dm.hdcp_workqueue) {
    3046             :                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
    3047             :                 dm_con_state->update_hdcp = true;
    3048             :         }
    3049             : #endif
    3050           0 :         if (aconnector->fake_enable)
    3051           0 :                 aconnector->fake_enable = false;
    3052             : 
    3053           0 :         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
    3054           0 :                 DRM_ERROR("KMS: Failed to detect connector\n");
    3055             : 
    3056           0 :         if (aconnector->base.force && new_connection_type == dc_connection_none) {
    3057           0 :                 emulated_link_detect(aconnector->dc_link);
    3058             : 
    3059           0 :                 drm_modeset_lock_all(dev);
    3060           0 :                 dm_restore_drm_connector_state(dev, connector);
    3061           0 :                 drm_modeset_unlock_all(dev);
    3062             : 
    3063           0 :                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
    3064           0 :                         drm_kms_helper_connector_hotplug_event(connector);
    3065             :         } else {
    3066           0 :                 mutex_lock(&adev->dm.dc_lock);
    3067           0 :                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
    3068           0 :                 mutex_unlock(&adev->dm.dc_lock);
    3069           0 :                 if (ret) {
    3070           0 :                         amdgpu_dm_update_connector_after_detect(aconnector);
    3071             : 
    3072           0 :                         drm_modeset_lock_all(dev);
    3073           0 :                         dm_restore_drm_connector_state(dev, connector);
    3074           0 :                         drm_modeset_unlock_all(dev);
    3075             : 
    3076           0 :                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
    3077           0 :                                 drm_kms_helper_connector_hotplug_event(connector);
    3078             :                 }
    3079             :         }
    3080           0 :         mutex_unlock(&aconnector->hpd_lock);
    3081             : 
    3082             : }
    3083             : 
    3084           0 : static void handle_hpd_irq(void *param)
    3085             : {
    3086           0 :         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
    3087             : 
    3088           0 :         handle_hpd_irq_helper(aconnector);
    3089             : 
    3090           0 : }
    3091             : 
    3092           0 : static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
    3093             : {
    3094           0 :         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
    3095             :         uint8_t dret;
    3096           0 :         bool new_irq_handled = false;
    3097             :         int dpcd_addr;
    3098             :         int dpcd_bytes_to_read;
    3099             : 
    3100           0 :         const int max_process_count = 30;
    3101           0 :         int process_count = 0;
    3102             : 
    3103           0 :         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
    3104             : 
    3105           0 :         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
    3106             :                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
    3107             :                 /* DPCD 0x200 - 0x201 for downstream IRQ */
    3108             :                 dpcd_addr = DP_SINK_COUNT;
    3109             :         } else {
    3110           0 :                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
    3111             :                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
    3112           0 :                 dpcd_addr = DP_SINK_COUNT_ESI;
    3113             :         }
    3114             : 
    3115           0 :         dret = drm_dp_dpcd_read(
    3116             :                 &aconnector->dm_dp_aux.aux,
    3117             :                 dpcd_addr,
    3118             :                 esi,
    3119             :                 dpcd_bytes_to_read);
    3120             : 
    3121           0 :         while (dret == dpcd_bytes_to_read &&
    3122             :                 process_count < max_process_count) {
    3123             :                 uint8_t retry;
    3124           0 :                 dret = 0;
    3125             : 
    3126           0 :                 process_count++;
    3127             : 
    3128           0 :                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
    3129             :                 /* handle HPD short pulse irq */
    3130           0 :                 if (aconnector->mst_mgr.mst_state)
    3131           0 :                         drm_dp_mst_hpd_irq(
    3132             :                                 &aconnector->mst_mgr,
    3133             :                                 esi,
    3134             :                                 &new_irq_handled);
    3135             : 
    3136           0 :                 if (new_irq_handled) {
    3137             :                         /* ACK at DPCD to notify down stream */
    3138           0 :                         const int ack_dpcd_bytes_to_write =
    3139             :                                 dpcd_bytes_to_read - 1;
    3140             : 
    3141           0 :                         for (retry = 0; retry < 3; retry++) {
    3142             :                                 uint8_t wret;
    3143             : 
    3144           0 :                                 wret = drm_dp_dpcd_write(
    3145             :                                         &aconnector->dm_dp_aux.aux,
    3146           0 :                                         dpcd_addr + 1,
    3147             :                                         &esi[1],
    3148             :                                         ack_dpcd_bytes_to_write);
    3149           0 :                                 if (wret == ack_dpcd_bytes_to_write)
    3150             :                                         break;
    3151             :                         }
    3152             : 
    3153             :                         /* check if there is new irq to be handled */
    3154           0 :                         dret = drm_dp_dpcd_read(
    3155             :                                 &aconnector->dm_dp_aux.aux,
    3156             :                                 dpcd_addr,
    3157             :                                 esi,
    3158             :                                 dpcd_bytes_to_read);
    3159             : 
    3160           0 :                         new_irq_handled = false;
    3161             :                 } else {
    3162             :                         break;
    3163             :                 }
    3164             :         }
    3165             : 
    3166           0 :         if (process_count == max_process_count)
    3167           0 :                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
    3168           0 : }
    3169             : 
    3170           0 : static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
    3171             :                                                         union hpd_irq_data hpd_irq_data)
    3172             : {
    3173           0 :         struct hpd_rx_irq_offload_work *offload_work =
    3174             :                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
    3175             : 
    3176           0 :         if (!offload_work) {
    3177           0 :                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
    3178           0 :                 return;
    3179             :         }
    3180             : 
    3181           0 :         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
    3182           0 :         offload_work->data = hpd_irq_data;
    3183           0 :         offload_work->offload_wq = offload_wq;
    3184             : 
    3185           0 :         queue_work(offload_wq->wq, &offload_work->work);
    3186           0 :         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
    3187             : }
    3188             : 
    3189           0 : static void handle_hpd_rx_irq(void *param)
    3190             : {
    3191           0 :         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
    3192           0 :         struct drm_connector *connector = &aconnector->base;
    3193           0 :         struct drm_device *dev = connector->dev;
    3194           0 :         struct dc_link *dc_link = aconnector->dc_link;
    3195           0 :         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
    3196           0 :         bool result = false;
    3197           0 :         enum dc_connection_type new_connection_type = dc_connection_none;
    3198           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    3199             :         union hpd_irq_data hpd_irq_data;
    3200           0 :         bool link_loss = false;
    3201           0 :         bool has_left_work = false;
    3202           0 :         int idx = aconnector->base.index;
    3203           0 :         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
    3204             : 
    3205           0 :         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
    3206             : 
    3207           0 :         if (adev->dm.disable_hpd_irq)
    3208           0 :                 return;
    3209             : 
    3210             :         /*
    3211             :          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
    3212             :          * conflict, after implement i2c helper, this mutex should be
    3213             :          * retired.
    3214             :          */
    3215           0 :         mutex_lock(&aconnector->hpd_lock);
    3216             : 
    3217           0 :         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
    3218             :                                                 &link_loss, true, &has_left_work);
    3219             : 
    3220           0 :         if (!has_left_work)
    3221             :                 goto out;
    3222             : 
    3223           0 :         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
    3224           0 :                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
    3225           0 :                 goto out;
    3226             :         }
    3227             : 
    3228           0 :         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
    3229           0 :                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
    3230             :                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
    3231           0 :                         dm_handle_mst_sideband_msg(aconnector);
    3232           0 :                         goto out;
    3233             :                 }
    3234             : 
    3235           0 :                 if (link_loss) {
    3236           0 :                         bool skip = false;
    3237             : 
    3238           0 :                         spin_lock(&offload_wq->offload_lock);
    3239           0 :                         skip = offload_wq->is_handling_link_loss;
    3240             : 
    3241           0 :                         if (!skip)
    3242           0 :                                 offload_wq->is_handling_link_loss = true;
    3243             : 
    3244           0 :                         spin_unlock(&offload_wq->offload_lock);
    3245             : 
    3246           0 :                         if (!skip)
    3247           0 :                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
    3248             : 
    3249             :                         goto out;
    3250             :                 }
    3251             :         }
    3252             : 
    3253             : out:
    3254           0 :         if (result && !is_mst_root_connector) {
    3255             :                 /* Downstream Port status changed. */
    3256           0 :                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
    3257           0 :                         DRM_ERROR("KMS: Failed to detect connector\n");
    3258             : 
    3259           0 :                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
    3260           0 :                         emulated_link_detect(dc_link);
    3261             : 
    3262           0 :                         if (aconnector->fake_enable)
    3263           0 :                                 aconnector->fake_enable = false;
    3264             : 
    3265           0 :                         amdgpu_dm_update_connector_after_detect(aconnector);
    3266             : 
    3267             : 
    3268           0 :                         drm_modeset_lock_all(dev);
    3269           0 :                         dm_restore_drm_connector_state(dev, connector);
    3270           0 :                         drm_modeset_unlock_all(dev);
    3271             : 
    3272           0 :                         drm_kms_helper_connector_hotplug_event(connector);
    3273             :                 } else {
    3274           0 :                         bool ret = false;
    3275             : 
    3276           0 :                         mutex_lock(&adev->dm.dc_lock);
    3277           0 :                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
    3278           0 :                         mutex_unlock(&adev->dm.dc_lock);
    3279             : 
    3280           0 :                         if (ret) {
    3281           0 :                                 if (aconnector->fake_enable)
    3282           0 :                                         aconnector->fake_enable = false;
    3283             : 
    3284           0 :                                 amdgpu_dm_update_connector_after_detect(aconnector);
    3285             : 
    3286           0 :                                 drm_modeset_lock_all(dev);
    3287           0 :                                 dm_restore_drm_connector_state(dev, connector);
    3288           0 :                                 drm_modeset_unlock_all(dev);
    3289             : 
    3290           0 :                                 drm_kms_helper_connector_hotplug_event(connector);
    3291             :                         }
    3292             :                 }
    3293             :         }
    3294             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    3295             :         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
    3296             :                 if (adev->dm.hdcp_workqueue)
    3297             :                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
    3298             :         }
    3299             : #endif
    3300             : 
    3301             :         if (dc_link->type != dc_connection_mst_branch)
    3302             :                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
    3303             : 
    3304           0 :         mutex_unlock(&aconnector->hpd_lock);
    3305             : }
    3306             : 
    3307           0 : static void register_hpd_handlers(struct amdgpu_device *adev)
    3308             : {
    3309           0 :         struct drm_device *dev = adev_to_drm(adev);
    3310             :         struct drm_connector *connector;
    3311             :         struct amdgpu_dm_connector *aconnector;
    3312             :         const struct dc_link *dc_link;
    3313           0 :         struct dc_interrupt_params int_params = {0};
    3314             : 
    3315             :         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
    3316             :         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
    3317             : 
    3318           0 :         list_for_each_entry(connector,
    3319             :                         &dev->mode_config.connector_list, head)  {
    3320             : 
    3321           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    3322           0 :                 dc_link = aconnector->dc_link;
    3323             : 
    3324           0 :                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
    3325           0 :                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
    3326           0 :                         int_params.irq_source = dc_link->irq_source_hpd;
    3327             : 
    3328           0 :                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3329             :                                         handle_hpd_irq,
    3330             :                                         (void *) aconnector);
    3331             :                 }
    3332             : 
    3333           0 :                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
    3334             : 
    3335             :                         /* Also register for DP short pulse (hpd_rx). */
    3336           0 :                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
    3337           0 :                         int_params.irq_source = dc_link->irq_source_hpd_rx;
    3338             : 
    3339           0 :                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3340             :                                         handle_hpd_rx_irq,
    3341             :                                         (void *) aconnector);
    3342             : 
    3343           0 :                         if (adev->dm.hpd_rx_offload_wq)
    3344           0 :                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
    3345             :                                         aconnector;
    3346             :                 }
    3347             :         }
    3348           0 : }
    3349             : 
    3350             : #if defined(CONFIG_DRM_AMD_DC_SI)
    3351             : /* Register IRQ sources and initialize IRQ callbacks */
    3352             : static int dce60_register_irq_handlers(struct amdgpu_device *adev)
    3353             : {
    3354             :         struct dc *dc = adev->dm.dc;
    3355             :         struct common_irq_params *c_irq_params;
    3356             :         struct dc_interrupt_params int_params = {0};
    3357             :         int r;
    3358             :         int i;
    3359             :         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
    3360             : 
    3361             :         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
    3362             :         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
    3363             : 
    3364             :         /*
    3365             :          * Actions of amdgpu_irq_add_id():
    3366             :          * 1. Register a set() function with base driver.
    3367             :          *    Base driver will call set() function to enable/disable an
    3368             :          *    interrupt in DC hardware.
    3369             :          * 2. Register amdgpu_dm_irq_handler().
    3370             :          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
    3371             :          *    coming from DC hardware.
    3372             :          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
    3373             :          *    for acknowledging and handling. */
    3374             : 
    3375             :         /* Use VBLANK interrupt */
    3376             :         for (i = 0; i < adev->mode_info.num_crtc; i++) {
    3377             :                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
    3378             :                 if (r) {
    3379             :                         DRM_ERROR("Failed to add crtc irq id!\n");
    3380             :                         return r;
    3381             :                 }
    3382             : 
    3383             :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3384             :                 int_params.irq_source =
    3385             :                         dc_interrupt_to_irq_source(dc, i+1 , 0);
    3386             : 
    3387             :                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
    3388             : 
    3389             :                 c_irq_params->adev = adev;
    3390             :                 c_irq_params->irq_src = int_params.irq_source;
    3391             : 
    3392             :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3393             :                                 dm_crtc_high_irq, c_irq_params);
    3394             :         }
    3395             : 
    3396             :         /* Use GRPH_PFLIP interrupt */
    3397             :         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
    3398             :                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
    3399             :                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
    3400             :                 if (r) {
    3401             :                         DRM_ERROR("Failed to add page flip irq id!\n");
    3402             :                         return r;
    3403             :                 }
    3404             : 
    3405             :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3406             :                 int_params.irq_source =
    3407             :                         dc_interrupt_to_irq_source(dc, i, 0);
    3408             : 
    3409             :                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
    3410             : 
    3411             :                 c_irq_params->adev = adev;
    3412             :                 c_irq_params->irq_src = int_params.irq_source;
    3413             : 
    3414             :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3415             :                                 dm_pflip_high_irq, c_irq_params);
    3416             : 
    3417             :         }
    3418             : 
    3419             :         /* HPD */
    3420             :         r = amdgpu_irq_add_id(adev, client_id,
    3421             :                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
    3422             :         if (r) {
    3423             :                 DRM_ERROR("Failed to add hpd irq id!\n");
    3424             :                 return r;
    3425             :         }
    3426             : 
    3427             :         register_hpd_handlers(adev);
    3428             : 
    3429             :         return 0;
    3430             : }
    3431             : #endif
    3432             : 
    3433             : /* Register IRQ sources and initialize IRQ callbacks */
    3434           0 : static int dce110_register_irq_handlers(struct amdgpu_device *adev)
    3435             : {
    3436           0 :         struct dc *dc = adev->dm.dc;
    3437             :         struct common_irq_params *c_irq_params;
    3438           0 :         struct dc_interrupt_params int_params = {0};
    3439             :         int r;
    3440             :         int i;
    3441           0 :         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
    3442             : 
    3443           0 :         if (adev->family >= AMDGPU_FAMILY_AI)
    3444           0 :                 client_id = SOC15_IH_CLIENTID_DCE;
    3445             : 
    3446             :         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
    3447             :         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
    3448             : 
    3449             :         /*
    3450             :          * Actions of amdgpu_irq_add_id():
    3451             :          * 1. Register a set() function with base driver.
    3452             :          *    Base driver will call set() function to enable/disable an
    3453             :          *    interrupt in DC hardware.
    3454             :          * 2. Register amdgpu_dm_irq_handler().
    3455             :          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
    3456             :          *    coming from DC hardware.
    3457             :          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
    3458             :          *    for acknowledging and handling. */
    3459             : 
    3460             :         /* Use VBLANK interrupt */
    3461           0 :         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
    3462           0 :                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
    3463           0 :                 if (r) {
    3464           0 :                         DRM_ERROR("Failed to add crtc irq id!\n");
    3465           0 :                         return r;
    3466             :                 }
    3467             : 
    3468           0 :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3469           0 :                 int_params.irq_source =
    3470           0 :                         dc_interrupt_to_irq_source(dc, i, 0);
    3471             : 
    3472           0 :                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
    3473             : 
    3474           0 :                 c_irq_params->adev = adev;
    3475           0 :                 c_irq_params->irq_src = int_params.irq_source;
    3476             : 
    3477           0 :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3478             :                                 dm_crtc_high_irq, c_irq_params);
    3479             :         }
    3480             : 
    3481             :         /* Use VUPDATE interrupt */
    3482           0 :         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
    3483           0 :                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
    3484           0 :                 if (r) {
    3485           0 :                         DRM_ERROR("Failed to add vupdate irq id!\n");
    3486           0 :                         return r;
    3487             :                 }
    3488             : 
    3489           0 :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3490           0 :                 int_params.irq_source =
    3491           0 :                         dc_interrupt_to_irq_source(dc, i, 0);
    3492             : 
    3493           0 :                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
    3494             : 
    3495           0 :                 c_irq_params->adev = adev;
    3496           0 :                 c_irq_params->irq_src = int_params.irq_source;
    3497             : 
    3498           0 :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3499             :                                 dm_vupdate_high_irq, c_irq_params);
    3500             :         }
    3501             : 
    3502             :         /* Use GRPH_PFLIP interrupt */
    3503           0 :         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
    3504           0 :                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
    3505           0 :                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
    3506           0 :                 if (r) {
    3507           0 :                         DRM_ERROR("Failed to add page flip irq id!\n");
    3508           0 :                         return r;
    3509             :                 }
    3510             : 
    3511           0 :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3512           0 :                 int_params.irq_source =
    3513           0 :                         dc_interrupt_to_irq_source(dc, i, 0);
    3514             : 
    3515           0 :                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
    3516             : 
    3517           0 :                 c_irq_params->adev = adev;
    3518           0 :                 c_irq_params->irq_src = int_params.irq_source;
    3519             : 
    3520           0 :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3521             :                                 dm_pflip_high_irq, c_irq_params);
    3522             : 
    3523             :         }
    3524             : 
    3525             :         /* HPD */
    3526           0 :         r = amdgpu_irq_add_id(adev, client_id,
    3527             :                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
    3528           0 :         if (r) {
    3529           0 :                 DRM_ERROR("Failed to add hpd irq id!\n");
    3530           0 :                 return r;
    3531             :         }
    3532             : 
    3533           0 :         register_hpd_handlers(adev);
    3534             : 
    3535           0 :         return 0;
    3536             : }
    3537             : 
    3538             : /* Register IRQ sources and initialize IRQ callbacks */
    3539           0 : static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
    3540             : {
    3541           0 :         struct dc *dc = adev->dm.dc;
    3542             :         struct common_irq_params *c_irq_params;
    3543           0 :         struct dc_interrupt_params int_params = {0};
    3544             :         int r;
    3545             :         int i;
    3546             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    3547             :         static const unsigned int vrtl_int_srcid[] = {
    3548             :                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
    3549             :                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
    3550             :                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
    3551             :                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
    3552             :                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
    3553             :                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
    3554             :         };
    3555             : #endif
    3556             : 
    3557             :         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
    3558             :         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
    3559             : 
    3560             :         /*
    3561             :          * Actions of amdgpu_irq_add_id():
    3562             :          * 1. Register a set() function with base driver.
    3563             :          *    Base driver will call set() function to enable/disable an
    3564             :          *    interrupt in DC hardware.
    3565             :          * 2. Register amdgpu_dm_irq_handler().
    3566             :          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
    3567             :          *    coming from DC hardware.
    3568             :          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
    3569             :          *    for acknowledging and handling.
    3570             :          */
    3571             : 
    3572             :         /* Use VSTARTUP interrupt */
    3573           0 :         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
    3574           0 :                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
    3575           0 :                         i++) {
    3576           0 :                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
    3577             : 
    3578           0 :                 if (r) {
    3579           0 :                         DRM_ERROR("Failed to add crtc irq id!\n");
    3580           0 :                         return r;
    3581             :                 }
    3582             : 
    3583           0 :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3584           0 :                 int_params.irq_source =
    3585           0 :                         dc_interrupt_to_irq_source(dc, i, 0);
    3586             : 
    3587           0 :                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
    3588             : 
    3589           0 :                 c_irq_params->adev = adev;
    3590           0 :                 c_irq_params->irq_src = int_params.irq_source;
    3591             : 
    3592           0 :                 amdgpu_dm_irq_register_interrupt(
    3593             :                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
    3594             :         }
    3595             : 
    3596             :         /* Use otg vertical line interrupt */
    3597             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    3598             :         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
    3599             :                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
    3600             :                                 vrtl_int_srcid[i], &adev->vline0_irq);
    3601             : 
    3602             :                 if (r) {
    3603             :                         DRM_ERROR("Failed to add vline0 irq id!\n");
    3604             :                         return r;
    3605             :                 }
    3606             : 
    3607             :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3608             :                 int_params.irq_source =
    3609             :                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
    3610             : 
    3611             :                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
    3612             :                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
    3613             :                         break;
    3614             :                 }
    3615             : 
    3616             :                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
    3617             :                                         - DC_IRQ_SOURCE_DC1_VLINE0];
    3618             : 
    3619             :                 c_irq_params->adev = adev;
    3620             :                 c_irq_params->irq_src = int_params.irq_source;
    3621             : 
    3622             :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3623             :                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
    3624             :         }
    3625             : #endif
    3626             : 
    3627             :         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
    3628             :          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
    3629             :          * to trigger at end of each vblank, regardless of state of the lock,
    3630             :          * matching DCE behaviour.
    3631             :          */
    3632           0 :         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
    3633           0 :              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
    3634           0 :              i++) {
    3635           0 :                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
    3636             : 
    3637           0 :                 if (r) {
    3638           0 :                         DRM_ERROR("Failed to add vupdate irq id!\n");
    3639           0 :                         return r;
    3640             :                 }
    3641             : 
    3642           0 :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3643           0 :                 int_params.irq_source =
    3644           0 :                         dc_interrupt_to_irq_source(dc, i, 0);
    3645             : 
    3646           0 :                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
    3647             : 
    3648           0 :                 c_irq_params->adev = adev;
    3649           0 :                 c_irq_params->irq_src = int_params.irq_source;
    3650             : 
    3651           0 :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3652             :                                 dm_vupdate_high_irq, c_irq_params);
    3653             :         }
    3654             : 
    3655             :         /* Use GRPH_PFLIP interrupt */
    3656           0 :         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
    3657           0 :                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
    3658           0 :                         i++) {
    3659           0 :                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
    3660           0 :                 if (r) {
    3661           0 :                         DRM_ERROR("Failed to add page flip irq id!\n");
    3662           0 :                         return r;
    3663             :                 }
    3664             : 
    3665           0 :                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
    3666           0 :                 int_params.irq_source =
    3667           0 :                         dc_interrupt_to_irq_source(dc, i, 0);
    3668             : 
    3669           0 :                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
    3670             : 
    3671           0 :                 c_irq_params->adev = adev;
    3672           0 :                 c_irq_params->irq_src = int_params.irq_source;
    3673             : 
    3674           0 :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3675             :                                 dm_pflip_high_irq, c_irq_params);
    3676             : 
    3677             :         }
    3678             : 
    3679             :         /* HPD */
    3680           0 :         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
    3681             :                         &adev->hpd_irq);
    3682           0 :         if (r) {
    3683           0 :                 DRM_ERROR("Failed to add hpd irq id!\n");
    3684           0 :                 return r;
    3685             :         }
    3686             : 
    3687           0 :         register_hpd_handlers(adev);
    3688             : 
    3689           0 :         return 0;
    3690             : }
    3691             : /* Register Outbox IRQ sources and initialize IRQ callbacks */
    3692           0 : static int register_outbox_irq_handlers(struct amdgpu_device *adev)
    3693             : {
    3694           0 :         struct dc *dc = adev->dm.dc;
    3695             :         struct common_irq_params *c_irq_params;
    3696           0 :         struct dc_interrupt_params int_params = {0};
    3697             :         int r, i;
    3698             : 
    3699             :         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
    3700             :         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
    3701             : 
    3702           0 :         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
    3703             :                         &adev->dmub_outbox_irq);
    3704           0 :         if (r) {
    3705           0 :                 DRM_ERROR("Failed to add outbox irq id!\n");
    3706           0 :                 return r;
    3707             :         }
    3708             : 
    3709           0 :         if (dc->ctx->dmub_srv) {
    3710           0 :                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
    3711           0 :                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
    3712           0 :                 int_params.irq_source =
    3713           0 :                 dc_interrupt_to_irq_source(dc, i, 0);
    3714             : 
    3715           0 :                 c_irq_params = &adev->dm.dmub_outbox_params[0];
    3716             : 
    3717           0 :                 c_irq_params->adev = adev;
    3718           0 :                 c_irq_params->irq_src = int_params.irq_source;
    3719             : 
    3720           0 :                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
    3721             :                                 dm_dmub_outbox1_low_irq, c_irq_params);
    3722             :         }
    3723             : 
    3724             :         return 0;
    3725             : }
    3726             : 
    3727             : /*
    3728             :  * Acquires the lock for the atomic state object and returns
    3729             :  * the new atomic state.
    3730             :  *
    3731             :  * This should only be called during atomic check.
    3732             :  */
    3733           0 : int dm_atomic_get_state(struct drm_atomic_state *state,
    3734             :                         struct dm_atomic_state **dm_state)
    3735             : {
    3736           0 :         struct drm_device *dev = state->dev;
    3737           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    3738           0 :         struct amdgpu_display_manager *dm = &adev->dm;
    3739             :         struct drm_private_state *priv_state;
    3740             : 
    3741           0 :         if (*dm_state)
    3742             :                 return 0;
    3743             : 
    3744           0 :         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
    3745           0 :         if (IS_ERR(priv_state))
    3746           0 :                 return PTR_ERR(priv_state);
    3747             : 
    3748           0 :         *dm_state = to_dm_atomic_state(priv_state);
    3749             : 
    3750           0 :         return 0;
    3751             : }
    3752             : 
    3753             : static struct dm_atomic_state *
    3754             : dm_atomic_get_new_state(struct drm_atomic_state *state)
    3755             : {
    3756           0 :         struct drm_device *dev = state->dev;
    3757           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    3758           0 :         struct amdgpu_display_manager *dm = &adev->dm;
    3759             :         struct drm_private_obj *obj;
    3760             :         struct drm_private_state *new_obj_state;
    3761             :         int i;
    3762             : 
    3763           0 :         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
    3764           0 :                 if (obj->funcs == dm->atomic_obj.funcs)
    3765             :                         return to_dm_atomic_state(new_obj_state);
    3766             :         }
    3767             : 
    3768             :         return NULL;
    3769             : }
    3770             : 
    3771             : static struct drm_private_state *
    3772           0 : dm_atomic_duplicate_state(struct drm_private_obj *obj)
    3773             : {
    3774             :         struct dm_atomic_state *old_state, *new_state;
    3775             : 
    3776           0 :         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
    3777           0 :         if (!new_state)
    3778             :                 return NULL;
    3779             : 
    3780           0 :         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
    3781             : 
    3782           0 :         old_state = to_dm_atomic_state(obj->state);
    3783             : 
    3784           0 :         if (old_state && old_state->context)
    3785           0 :                 new_state->context = dc_copy_state(old_state->context);
    3786             : 
    3787           0 :         if (!new_state->context) {
    3788           0 :                 kfree(new_state);
    3789           0 :                 return NULL;
    3790             :         }
    3791             : 
    3792             :         return &new_state->base;
    3793             : }
    3794             : 
    3795           0 : static void dm_atomic_destroy_state(struct drm_private_obj *obj,
    3796             :                                     struct drm_private_state *state)
    3797             : {
    3798           0 :         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
    3799             : 
    3800           0 :         if (dm_state && dm_state->context)
    3801           0 :                 dc_release_state(dm_state->context);
    3802             : 
    3803           0 :         kfree(dm_state);
    3804           0 : }
    3805             : 
    3806             : static struct drm_private_state_funcs dm_atomic_state_funcs = {
    3807             :         .atomic_duplicate_state = dm_atomic_duplicate_state,
    3808             :         .atomic_destroy_state = dm_atomic_destroy_state,
    3809             : };
    3810             : 
    3811           0 : static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
    3812             : {
    3813             :         struct dm_atomic_state *state;
    3814             :         int r;
    3815             : 
    3816           0 :         adev->mode_info.mode_config_initialized = true;
    3817             : 
    3818           0 :         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
    3819           0 :         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
    3820             : 
    3821           0 :         adev_to_drm(adev)->mode_config.max_width = 16384;
    3822           0 :         adev_to_drm(adev)->mode_config.max_height = 16384;
    3823             : 
    3824           0 :         adev_to_drm(adev)->mode_config.preferred_depth = 24;
    3825           0 :         if (adev->asic_type == CHIP_HAWAII)
    3826             :                 /* disable prefer shadow for now due to hibernation issues */
    3827           0 :                 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
    3828             :         else
    3829           0 :                 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
    3830             :         /* indicates support for immediate flip */
    3831           0 :         adev_to_drm(adev)->mode_config.async_page_flip = true;
    3832             : 
    3833           0 :         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
    3834             : 
    3835           0 :         state = kzalloc(sizeof(*state), GFP_KERNEL);
    3836           0 :         if (!state)
    3837             :                 return -ENOMEM;
    3838             : 
    3839           0 :         state->context = dc_create_state(adev->dm.dc);
    3840           0 :         if (!state->context) {
    3841           0 :                 kfree(state);
    3842           0 :                 return -ENOMEM;
    3843             :         }
    3844             : 
    3845           0 :         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
    3846             : 
    3847           0 :         drm_atomic_private_obj_init(adev_to_drm(adev),
    3848             :                                     &adev->dm.atomic_obj,
    3849             :                                     &state->base,
    3850             :                                     &dm_atomic_state_funcs);
    3851             : 
    3852           0 :         r = amdgpu_display_modeset_create_props(adev);
    3853           0 :         if (r) {
    3854           0 :                 dc_release_state(state->context);
    3855           0 :                 kfree(state);
    3856           0 :                 return r;
    3857             :         }
    3858             : 
    3859           0 :         r = amdgpu_dm_audio_init(adev);
    3860           0 :         if (r) {
    3861           0 :                 dc_release_state(state->context);
    3862           0 :                 kfree(state);
    3863           0 :                 return r;
    3864             :         }
    3865             : 
    3866             :         return 0;
    3867             : }
    3868             : 
    3869             : #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
    3870             : #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
    3871             : #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
    3872             : 
    3873             : static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
    3874             :                                             int bl_idx)
    3875             : {
    3876             : #if defined(CONFIG_ACPI)
    3877             :         struct amdgpu_dm_backlight_caps caps;
    3878             : 
    3879             :         memset(&caps, 0, sizeof(caps));
    3880             : 
    3881             :         if (dm->backlight_caps[bl_idx].caps_valid)
    3882             :                 return;
    3883             : 
    3884             :         amdgpu_acpi_get_backlight_caps(&caps);
    3885             :         if (caps.caps_valid) {
    3886             :                 dm->backlight_caps[bl_idx].caps_valid = true;
    3887             :                 if (caps.aux_support)
    3888             :                         return;
    3889             :                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
    3890             :                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
    3891             :         } else {
    3892             :                 dm->backlight_caps[bl_idx].min_input_signal =
    3893             :                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
    3894             :                 dm->backlight_caps[bl_idx].max_input_signal =
    3895             :                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
    3896             :         }
    3897             : #else
    3898           0 :         if (dm->backlight_caps[bl_idx].aux_support)
    3899             :                 return;
    3900             : 
    3901           0 :         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
    3902           0 :         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
    3903             : #endif
    3904             : }
    3905             : 
    3906             : static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
    3907             :                                 unsigned *min, unsigned *max)
    3908             : {
    3909           0 :         if (!caps)
    3910             :                 return 0;
    3911             : 
    3912           0 :         if (caps->aux_support) {
    3913             :                 // Firmware limits are in nits, DC API wants millinits.
    3914           0 :                 *max = 1000 * caps->aux_max_input_signal;
    3915           0 :                 *min = 1000 * caps->aux_min_input_signal;
    3916             :         } else {
    3917             :                 // Firmware limits are 8-bit, PWM control is 16-bit.
    3918           0 :                 *max = 0x101 * caps->max_input_signal;
    3919           0 :                 *min = 0x101 * caps->min_input_signal;
    3920             :         }
    3921             :         return 1;
    3922             : }
    3923             : 
    3924             : static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
    3925             :                                         uint32_t brightness)
    3926             : {
    3927             :         unsigned min, max;
    3928             : 
    3929           0 :         if (!get_brightness_range(caps, &min, &max))
    3930             :                 return brightness;
    3931             : 
    3932             :         // Rescale 0..255 to min..max
    3933           0 :         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
    3934             :                                        AMDGPU_MAX_BL_LEVEL);
    3935             : }
    3936             : 
    3937           0 : static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
    3938             :                                       uint32_t brightness)
    3939             : {
    3940             :         unsigned min, max;
    3941             : 
    3942           0 :         if (!get_brightness_range(caps, &min, &max))
    3943             :                 return brightness;
    3944             : 
    3945           0 :         if (brightness < min)
    3946             :                 return 0;
    3947             :         // Rescale min..max to 0..255
    3948           0 :         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
    3949             :                                  max - min);
    3950             : }
    3951             : 
    3952           0 : static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
    3953             :                                          int bl_idx,
    3954             :                                          u32 user_brightness)
    3955             : {
    3956             :         struct amdgpu_dm_backlight_caps caps;
    3957             :         struct dc_link *link;
    3958             :         u32 brightness;
    3959             :         bool rc;
    3960             : 
    3961           0 :         amdgpu_dm_update_backlight_caps(dm, bl_idx);
    3962           0 :         caps = dm->backlight_caps[bl_idx];
    3963             : 
    3964           0 :         dm->brightness[bl_idx] = user_brightness;
    3965             :         /* update scratch register */
    3966           0 :         if (bl_idx == 0)
    3967           0 :                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
    3968           0 :         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
    3969           0 :         link = (struct dc_link *)dm->backlight_link[bl_idx];
    3970             : 
    3971             :         /* Change brightness based on AUX property */
    3972           0 :         if (caps.aux_support) {
    3973           0 :                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
    3974             :                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
    3975           0 :                 if (!rc)
    3976           0 :                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
    3977             :         } else {
    3978           0 :                 rc = dc_link_set_backlight_level(link, brightness, 0);
    3979           0 :                 if (!rc)
    3980           0 :                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
    3981             :         }
    3982             : 
    3983           0 :         if (rc)
    3984           0 :                 dm->actual_brightness[bl_idx] = user_brightness;
    3985           0 : }
    3986             : 
    3987           0 : static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
    3988             : {
    3989           0 :         struct amdgpu_display_manager *dm = bl_get_data(bd);
    3990             :         int i;
    3991             : 
    3992           0 :         for (i = 0; i < dm->num_of_edps; i++) {
    3993           0 :                 if (bd == dm->backlight_dev[i])
    3994             :                         break;
    3995             :         }
    3996           0 :         if (i >= AMDGPU_DM_MAX_NUM_EDP)
    3997           0 :                 i = 0;
    3998           0 :         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
    3999             : 
    4000           0 :         return 0;
    4001             : }
    4002             : 
    4003           0 : static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
    4004             :                                          int bl_idx)
    4005             : {
    4006             :         struct amdgpu_dm_backlight_caps caps;
    4007           0 :         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
    4008             : 
    4009           0 :         amdgpu_dm_update_backlight_caps(dm, bl_idx);
    4010           0 :         caps = dm->backlight_caps[bl_idx];
    4011             : 
    4012           0 :         if (caps.aux_support) {
    4013             :                 u32 avg, peak;
    4014             :                 bool rc;
    4015             : 
    4016           0 :                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
    4017           0 :                 if (!rc)
    4018           0 :                         return dm->brightness[bl_idx];
    4019           0 :                 return convert_brightness_to_user(&caps, avg);
    4020             :         } else {
    4021           0 :                 int ret = dc_link_get_backlight_level(link);
    4022             : 
    4023           0 :                 if (ret == DC_ERROR_UNEXPECTED)
    4024           0 :                         return dm->brightness[bl_idx];
    4025           0 :                 return convert_brightness_to_user(&caps, ret);
    4026             :         }
    4027             : }
    4028             : 
    4029           0 : static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
    4030             : {
    4031           0 :         struct amdgpu_display_manager *dm = bl_get_data(bd);
    4032             :         int i;
    4033             : 
    4034           0 :         for (i = 0; i < dm->num_of_edps; i++) {
    4035           0 :                 if (bd == dm->backlight_dev[i])
    4036             :                         break;
    4037             :         }
    4038           0 :         if (i >= AMDGPU_DM_MAX_NUM_EDP)
    4039           0 :                 i = 0;
    4040           0 :         return amdgpu_dm_backlight_get_level(dm, i);
    4041             : }
    4042             : 
    4043             : static const struct backlight_ops amdgpu_dm_backlight_ops = {
    4044             :         .options = BL_CORE_SUSPENDRESUME,
    4045             :         .get_brightness = amdgpu_dm_backlight_get_brightness,
    4046             :         .update_status  = amdgpu_dm_backlight_update_status,
    4047             : };
    4048             : 
    4049             : static void
    4050           0 : amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
    4051             : {
    4052             :         char bl_name[16];
    4053           0 :         struct backlight_properties props = { 0 };
    4054             : 
    4055           0 :         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
    4056           0 :         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
    4057             : 
    4058           0 :         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
    4059           0 :         props.brightness = AMDGPU_MAX_BL_LEVEL;
    4060           0 :         props.type = BACKLIGHT_RAW;
    4061             : 
    4062           0 :         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
    4063           0 :                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
    4064             : 
    4065           0 :         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
    4066           0 :                                                                        adev_to_drm(dm->adev)->dev,
    4067             :                                                                        dm,
    4068             :                                                                        &amdgpu_dm_backlight_ops,
    4069             :                                                                        &props);
    4070             : 
    4071           0 :         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
    4072           0 :                 DRM_ERROR("DM: Backlight registration failed!\n");
    4073             :         else
    4074           0 :                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
    4075           0 : }
    4076             : 
    4077           0 : static int initialize_plane(struct amdgpu_display_manager *dm,
    4078             :                             struct amdgpu_mode_info *mode_info, int plane_id,
    4079             :                             enum drm_plane_type plane_type,
    4080             :                             const struct dc_plane_cap *plane_cap)
    4081             : {
    4082             :         struct drm_plane *plane;
    4083             :         unsigned long possible_crtcs;
    4084           0 :         int ret = 0;
    4085             : 
    4086           0 :         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
    4087           0 :         if (!plane) {
    4088           0 :                 DRM_ERROR("KMS: Failed to allocate plane\n");
    4089           0 :                 return -ENOMEM;
    4090             :         }
    4091           0 :         plane->type = plane_type;
    4092             : 
    4093             :         /*
    4094             :          * HACK: IGT tests expect that the primary plane for a CRTC
    4095             :          * can only have one possible CRTC. Only expose support for
    4096             :          * any CRTC if they're not going to be used as a primary plane
    4097             :          * for a CRTC - like overlay or underlay planes.
    4098             :          */
    4099           0 :         possible_crtcs = 1 << plane_id;
    4100           0 :         if (plane_id >= dm->dc->caps.max_streams)
    4101           0 :                 possible_crtcs = 0xff;
    4102             : 
    4103           0 :         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
    4104             : 
    4105           0 :         if (ret) {
    4106           0 :                 DRM_ERROR("KMS: Failed to initialize plane\n");
    4107           0 :                 kfree(plane);
    4108           0 :                 return ret;
    4109             :         }
    4110             : 
    4111           0 :         if (mode_info)
    4112           0 :                 mode_info->planes[plane_id] = plane;
    4113             : 
    4114             :         return ret;
    4115             : }
    4116             : 
    4117             : 
    4118           0 : static void register_backlight_device(struct amdgpu_display_manager *dm,
    4119             :                                       struct dc_link *link)
    4120             : {
    4121           0 :         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
    4122           0 :             link->type != dc_connection_none) {
    4123             :                 /*
    4124             :                  * Event if registration failed, we should continue with
    4125             :                  * DM initialization because not having a backlight control
    4126             :                  * is better then a black screen.
    4127             :                  */
    4128           0 :                 if (!dm->backlight_dev[dm->num_of_edps])
    4129           0 :                         amdgpu_dm_register_backlight_device(dm);
    4130             : 
    4131           0 :                 if (dm->backlight_dev[dm->num_of_edps]) {
    4132           0 :                         dm->backlight_link[dm->num_of_edps] = link;
    4133           0 :                         dm->num_of_edps++;
    4134             :                 }
    4135             :         }
    4136           0 : }
    4137             : 
    4138             : static void amdgpu_set_panel_orientation(struct drm_connector *connector);
    4139             : 
    4140             : /*
    4141             :  * In this architecture, the association
    4142             :  * connector -> encoder -> crtc
    4143             :  * id not really requried. The crtc and connector will hold the
    4144             :  * display_index as an abstraction to use with DAL component
    4145             :  *
    4146             :  * Returns 0 on success
    4147             :  */
    4148           0 : static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
    4149             : {
    4150           0 :         struct amdgpu_display_manager *dm = &adev->dm;
    4151             :         int32_t i;
    4152           0 :         struct amdgpu_dm_connector *aconnector = NULL;
    4153           0 :         struct amdgpu_encoder *aencoder = NULL;
    4154           0 :         struct amdgpu_mode_info *mode_info = &adev->mode_info;
    4155             :         uint32_t link_cnt;
    4156             :         int32_t primary_planes;
    4157           0 :         enum dc_connection_type new_connection_type = dc_connection_none;
    4158             :         const struct dc_plane_cap *plane;
    4159           0 :         bool psr_feature_enabled = false;
    4160             : 
    4161           0 :         dm->display_indexes_num = dm->dc->caps.max_streams;
    4162             :         /* Update the actual used number of crtc */
    4163           0 :         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
    4164             : 
    4165           0 :         link_cnt = dm->dc->caps.max_links;
    4166           0 :         if (amdgpu_dm_mode_config_init(dm->adev)) {
    4167           0 :                 DRM_ERROR("DM: Failed to initialize mode config\n");
    4168           0 :                 return -EINVAL;
    4169             :         }
    4170             : 
    4171             :         /* There is one primary plane per CRTC */
    4172           0 :         primary_planes = dm->dc->caps.max_streams;
    4173           0 :         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
    4174             : 
    4175             :         /*
    4176             :          * Initialize primary planes, implicit planes for legacy IOCTLS.
    4177             :          * Order is reversed to match iteration order in atomic check.
    4178             :          */
    4179           0 :         for (i = (primary_planes - 1); i >= 0; i--) {
    4180           0 :                 plane = &dm->dc->caps.planes[i];
    4181             : 
    4182           0 :                 if (initialize_plane(dm, mode_info, i,
    4183             :                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
    4184           0 :                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
    4185           0 :                         goto fail;
    4186             :                 }
    4187             :         }
    4188             : 
    4189             :         /*
    4190             :          * Initialize overlay planes, index starting after primary planes.
    4191             :          * These planes have a higher DRM index than the primary planes since
    4192             :          * they should be considered as having a higher z-order.
    4193             :          * Order is reversed to match iteration order in atomic check.
    4194             :          *
    4195             :          * Only support DCN for now, and only expose one so we don't encourage
    4196             :          * userspace to use up all the pipes.
    4197             :          */
    4198           0 :         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
    4199           0 :                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
    4200             : 
    4201             :                 /* Do not create overlay if MPO disabled */
    4202           0 :                 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
    4203             :                         break;
    4204             : 
    4205           0 :                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
    4206           0 :                         continue;
    4207             : 
    4208           0 :                 if (!plane->blends_with_above || !plane->blends_with_below)
    4209           0 :                         continue;
    4210             : 
    4211           0 :                 if (!plane->pixel_format_support.argb8888)
    4212           0 :                         continue;
    4213             : 
    4214           0 :                 if (initialize_plane(dm, NULL, primary_planes + i,
    4215             :                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
    4216           0 :                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
    4217           0 :                         goto fail;
    4218             :                 }
    4219             : 
    4220             :                 /* Only create one overlay plane. */
    4221             :                 break;
    4222             :         }
    4223             : 
    4224           0 :         for (i = 0; i < dm->dc->caps.max_streams; i++)
    4225           0 :                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
    4226           0 :                         DRM_ERROR("KMS: Failed to initialize crtc\n");
    4227           0 :                         goto fail;
    4228             :                 }
    4229             : 
    4230             :         /* Use Outbox interrupt */
    4231           0 :         switch (adev->ip_versions[DCE_HWIP][0]) {
    4232             :         case IP_VERSION(3, 0, 0):
    4233             :         case IP_VERSION(3, 1, 2):
    4234             :         case IP_VERSION(3, 1, 3):
    4235             :         case IP_VERSION(3, 1, 4):
    4236             :         case IP_VERSION(3, 1, 5):
    4237             :         case IP_VERSION(3, 1, 6):
    4238             :         case IP_VERSION(3, 2, 0):
    4239             :         case IP_VERSION(3, 2, 1):
    4240             :         case IP_VERSION(2, 1, 0):
    4241           0 :                 if (register_outbox_irq_handlers(dm->adev)) {
    4242           0 :                         DRM_ERROR("DM: Failed to initialize IRQ\n");
    4243           0 :                         goto fail;
    4244             :                 }
    4245             :                 break;
    4246             :         default:
    4247           0 :                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
    4248             :                               adev->ip_versions[DCE_HWIP][0]);
    4249             :         }
    4250             : 
    4251             :         /* Determine whether to enable PSR support by default. */
    4252           0 :         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
    4253           0 :                 switch (adev->ip_versions[DCE_HWIP][0]) {
    4254             :                 case IP_VERSION(3, 1, 2):
    4255             :                 case IP_VERSION(3, 1, 3):
    4256             :                 case IP_VERSION(3, 1, 4):
    4257             :                 case IP_VERSION(3, 1, 5):
    4258             :                 case IP_VERSION(3, 1, 6):
    4259             :                 case IP_VERSION(3, 2, 0):
    4260             :                 case IP_VERSION(3, 2, 1):
    4261             :                         psr_feature_enabled = true;
    4262             :                         break;
    4263             :                 default:
    4264           0 :                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
    4265           0 :                         break;
    4266             :                 }
    4267             :         }
    4268             : 
    4269             :         /* loops over all connectors on the board */
    4270           0 :         for (i = 0; i < link_cnt; i++) {
    4271           0 :                 struct dc_link *link = NULL;
    4272             : 
    4273           0 :                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
    4274           0 :                         DRM_ERROR(
    4275             :                                 "KMS: Cannot support more than %d display indexes\n",
    4276             :                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
    4277           0 :                         continue;
    4278             :                 }
    4279             : 
    4280           0 :                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
    4281           0 :                 if (!aconnector)
    4282             :                         goto fail;
    4283             : 
    4284           0 :                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
    4285           0 :                 if (!aencoder)
    4286             :                         goto fail;
    4287             : 
    4288           0 :                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
    4289           0 :                         DRM_ERROR("KMS: Failed to initialize encoder\n");
    4290           0 :                         goto fail;
    4291             :                 }
    4292             : 
    4293           0 :                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
    4294           0 :                         DRM_ERROR("KMS: Failed to initialize connector\n");
    4295           0 :                         goto fail;
    4296             :                 }
    4297             : 
    4298           0 :                 link = dc_get_link_at_index(dm->dc, i);
    4299             : 
    4300           0 :                 if (!dc_link_detect_sink(link, &new_connection_type))
    4301           0 :                         DRM_ERROR("KMS: Failed to detect connector\n");
    4302             : 
    4303           0 :                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
    4304           0 :                         emulated_link_detect(link);
    4305           0 :                         amdgpu_dm_update_connector_after_detect(aconnector);
    4306             :                 } else {
    4307           0 :                         bool ret = false;
    4308             : 
    4309           0 :                         mutex_lock(&dm->dc_lock);
    4310           0 :                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
    4311           0 :                         mutex_unlock(&dm->dc_lock);
    4312             : 
    4313           0 :                         if (ret) {
    4314           0 :                                 amdgpu_dm_update_connector_after_detect(aconnector);
    4315           0 :                                 register_backlight_device(dm, link);
    4316             : 
    4317           0 :                                 if (dm->num_of_edps)
    4318           0 :                                         update_connector_ext_caps(aconnector);
    4319             : 
    4320           0 :                                 if (psr_feature_enabled)
    4321           0 :                                         amdgpu_dm_set_psr_caps(link);
    4322             : 
    4323             :                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
    4324             :                                  * PSR is also supported.
    4325             :                                  */
    4326           0 :                                 if (link->psr_settings.psr_feature_enabled)
    4327           0 :                                         adev_to_drm(adev)->vblank_disable_immediate = false;
    4328             :                         }
    4329             :                 }
    4330           0 :                 amdgpu_set_panel_orientation(&aconnector->base);
    4331             :         }
    4332             : 
    4333             :         /* Software is initialized. Now we can register interrupt handlers. */
    4334           0 :         switch (adev->asic_type) {
    4335             : #if defined(CONFIG_DRM_AMD_DC_SI)
    4336             :         case CHIP_TAHITI:
    4337             :         case CHIP_PITCAIRN:
    4338             :         case CHIP_VERDE:
    4339             :         case CHIP_OLAND:
    4340             :                 if (dce60_register_irq_handlers(dm->adev)) {
    4341             :                         DRM_ERROR("DM: Failed to initialize IRQ\n");
    4342             :                         goto fail;
    4343             :                 }
    4344             :                 break;
    4345             : #endif
    4346             :         case CHIP_BONAIRE:
    4347             :         case CHIP_HAWAII:
    4348             :         case CHIP_KAVERI:
    4349             :         case CHIP_KABINI:
    4350             :         case CHIP_MULLINS:
    4351             :         case CHIP_TONGA:
    4352             :         case CHIP_FIJI:
    4353             :         case CHIP_CARRIZO:
    4354             :         case CHIP_STONEY:
    4355             :         case CHIP_POLARIS11:
    4356             :         case CHIP_POLARIS10:
    4357             :         case CHIP_POLARIS12:
    4358             :         case CHIP_VEGAM:
    4359             :         case CHIP_VEGA10:
    4360             :         case CHIP_VEGA12:
    4361             :         case CHIP_VEGA20:
    4362           0 :                 if (dce110_register_irq_handlers(dm->adev)) {
    4363           0 :                         DRM_ERROR("DM: Failed to initialize IRQ\n");
    4364           0 :                         goto fail;
    4365             :                 }
    4366             :                 break;
    4367             :         default:
    4368           0 :                 switch (adev->ip_versions[DCE_HWIP][0]) {
    4369             :                 case IP_VERSION(1, 0, 0):
    4370             :                 case IP_VERSION(1, 0, 1):
    4371             :                 case IP_VERSION(2, 0, 2):
    4372             :                 case IP_VERSION(2, 0, 3):
    4373             :                 case IP_VERSION(2, 0, 0):
    4374             :                 case IP_VERSION(2, 1, 0):
    4375             :                 case IP_VERSION(3, 0, 0):
    4376             :                 case IP_VERSION(3, 0, 2):
    4377             :                 case IP_VERSION(3, 0, 3):
    4378             :                 case IP_VERSION(3, 0, 1):
    4379             :                 case IP_VERSION(3, 1, 2):
    4380             :                 case IP_VERSION(3, 1, 3):
    4381             :                 case IP_VERSION(3, 1, 4):
    4382             :                 case IP_VERSION(3, 1, 5):
    4383             :                 case IP_VERSION(3, 1, 6):
    4384             :                 case IP_VERSION(3, 2, 0):
    4385             :                 case IP_VERSION(3, 2, 1):
    4386           0 :                         if (dcn10_register_irq_handlers(dm->adev)) {
    4387           0 :                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
    4388           0 :                                 goto fail;
    4389             :                         }
    4390             :                         break;
    4391             :                 default:
    4392           0 :                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
    4393             :                                         adev->ip_versions[DCE_HWIP][0]);
    4394           0 :                         goto fail;
    4395             :                 }
    4396             :                 break;
    4397             :         }
    4398             : 
    4399             :         return 0;
    4400             : fail:
    4401           0 :         kfree(aencoder);
    4402           0 :         kfree(aconnector);
    4403             : 
    4404           0 :         return -EINVAL;
    4405             : }
    4406             : 
    4407             : static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
    4408             : {
    4409           0 :         drm_atomic_private_obj_fini(&dm->atomic_obj);
    4410             :         return;
    4411             : }
    4412             : 
    4413             : /******************************************************************************
    4414             :  * amdgpu_display_funcs functions
    4415             :  *****************************************************************************/
    4416             : 
    4417             : /*
    4418             :  * dm_bandwidth_update - program display watermarks
    4419             :  *
    4420             :  * @adev: amdgpu_device pointer
    4421             :  *
    4422             :  * Calculate and program the display watermarks and line buffer allocation.
    4423             :  */
    4424           0 : static void dm_bandwidth_update(struct amdgpu_device *adev)
    4425             : {
    4426             :         /* TODO: implement later */
    4427           0 : }
    4428             : 
    4429             : static const struct amdgpu_display_funcs dm_display_funcs = {
    4430             :         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
    4431             :         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
    4432             :         .backlight_set_level = NULL, /* never called for DC */
    4433             :         .backlight_get_level = NULL, /* never called for DC */
    4434             :         .hpd_sense = NULL,/* called unconditionally */
    4435             :         .hpd_set_polarity = NULL, /* called unconditionally */
    4436             :         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
    4437             :         .page_flip_get_scanoutpos =
    4438             :                 dm_crtc_get_scanoutpos,/* called unconditionally */
    4439             :         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
    4440             :         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
    4441             : };
    4442             : 
    4443             : #if defined(CONFIG_DEBUG_KERNEL_DC)
    4444             : 
    4445             : static ssize_t s3_debug_store(struct device *device,
    4446             :                               struct device_attribute *attr,
    4447             :                               const char *buf,
    4448             :                               size_t count)
    4449             : {
    4450             :         int ret;
    4451             :         int s3_state;
    4452             :         struct drm_device *drm_dev = dev_get_drvdata(device);
    4453             :         struct amdgpu_device *adev = drm_to_adev(drm_dev);
    4454             : 
    4455             :         ret = kstrtoint(buf, 0, &s3_state);
    4456             : 
    4457             :         if (ret == 0) {
    4458             :                 if (s3_state) {
    4459             :                         dm_resume(adev);
    4460             :                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
    4461             :                 } else
    4462             :                         dm_suspend(adev);
    4463             :         }
    4464             : 
    4465             :         return ret == 0 ? count : 0;
    4466             : }
    4467             : 
    4468             : DEVICE_ATTR_WO(s3_debug);
    4469             : 
    4470             : #endif
    4471             : 
    4472           0 : static int dm_early_init(void *handle)
    4473             : {
    4474           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    4475             : 
    4476           0 :         switch (adev->asic_type) {
    4477             : #if defined(CONFIG_DRM_AMD_DC_SI)
    4478             :         case CHIP_TAHITI:
    4479             :         case CHIP_PITCAIRN:
    4480             :         case CHIP_VERDE:
    4481             :                 adev->mode_info.num_crtc = 6;
    4482             :                 adev->mode_info.num_hpd = 6;
    4483             :                 adev->mode_info.num_dig = 6;
    4484             :                 break;
    4485             :         case CHIP_OLAND:
    4486             :                 adev->mode_info.num_crtc = 2;
    4487             :                 adev->mode_info.num_hpd = 2;
    4488             :                 adev->mode_info.num_dig = 2;
    4489             :                 break;
    4490             : #endif
    4491             :         case CHIP_BONAIRE:
    4492             :         case CHIP_HAWAII:
    4493           0 :                 adev->mode_info.num_crtc = 6;
    4494           0 :                 adev->mode_info.num_hpd = 6;
    4495           0 :                 adev->mode_info.num_dig = 6;
    4496           0 :                 break;
    4497             :         case CHIP_KAVERI:
    4498           0 :                 adev->mode_info.num_crtc = 4;
    4499           0 :                 adev->mode_info.num_hpd = 6;
    4500           0 :                 adev->mode_info.num_dig = 7;
    4501           0 :                 break;
    4502             :         case CHIP_KABINI:
    4503             :         case CHIP_MULLINS:
    4504           0 :                 adev->mode_info.num_crtc = 2;
    4505           0 :                 adev->mode_info.num_hpd = 6;
    4506           0 :                 adev->mode_info.num_dig = 6;
    4507           0 :                 break;
    4508             :         case CHIP_FIJI:
    4509             :         case CHIP_TONGA:
    4510           0 :                 adev->mode_info.num_crtc = 6;
    4511           0 :                 adev->mode_info.num_hpd = 6;
    4512           0 :                 adev->mode_info.num_dig = 7;
    4513           0 :                 break;
    4514             :         case CHIP_CARRIZO:
    4515           0 :                 adev->mode_info.num_crtc = 3;
    4516           0 :                 adev->mode_info.num_hpd = 6;
    4517           0 :                 adev->mode_info.num_dig = 9;
    4518           0 :                 break;
    4519             :         case CHIP_STONEY:
    4520           0 :                 adev->mode_info.num_crtc = 2;
    4521           0 :                 adev->mode_info.num_hpd = 6;
    4522           0 :                 adev->mode_info.num_dig = 9;
    4523           0 :                 break;
    4524             :         case CHIP_POLARIS11:
    4525             :         case CHIP_POLARIS12:
    4526           0 :                 adev->mode_info.num_crtc = 5;
    4527           0 :                 adev->mode_info.num_hpd = 5;
    4528           0 :                 adev->mode_info.num_dig = 5;
    4529           0 :                 break;
    4530             :         case CHIP_POLARIS10:
    4531             :         case CHIP_VEGAM:
    4532           0 :                 adev->mode_info.num_crtc = 6;
    4533           0 :                 adev->mode_info.num_hpd = 6;
    4534           0 :                 adev->mode_info.num_dig = 6;
    4535           0 :                 break;
    4536             :         case CHIP_VEGA10:
    4537             :         case CHIP_VEGA12:
    4538             :         case CHIP_VEGA20:
    4539           0 :                 adev->mode_info.num_crtc = 6;
    4540           0 :                 adev->mode_info.num_hpd = 6;
    4541           0 :                 adev->mode_info.num_dig = 6;
    4542           0 :                 break;
    4543             :         default:
    4544             : 
    4545           0 :                 switch (adev->ip_versions[DCE_HWIP][0]) {
    4546             :                 case IP_VERSION(2, 0, 2):
    4547             :                 case IP_VERSION(3, 0, 0):
    4548           0 :                         adev->mode_info.num_crtc = 6;
    4549           0 :                         adev->mode_info.num_hpd = 6;
    4550           0 :                         adev->mode_info.num_dig = 6;
    4551           0 :                         break;
    4552             :                 case IP_VERSION(2, 0, 0):
    4553             :                 case IP_VERSION(3, 0, 2):
    4554           0 :                         adev->mode_info.num_crtc = 5;
    4555           0 :                         adev->mode_info.num_hpd = 5;
    4556           0 :                         adev->mode_info.num_dig = 5;
    4557           0 :                         break;
    4558             :                 case IP_VERSION(2, 0, 3):
    4559             :                 case IP_VERSION(3, 0, 3):
    4560           0 :                         adev->mode_info.num_crtc = 2;
    4561           0 :                         adev->mode_info.num_hpd = 2;
    4562           0 :                         adev->mode_info.num_dig = 2;
    4563           0 :                         break;
    4564             :                 case IP_VERSION(1, 0, 0):
    4565             :                 case IP_VERSION(1, 0, 1):
    4566             :                 case IP_VERSION(3, 0, 1):
    4567             :                 case IP_VERSION(2, 1, 0):
    4568             :                 case IP_VERSION(3, 1, 2):
    4569             :                 case IP_VERSION(3, 1, 3):
    4570             :                 case IP_VERSION(3, 1, 4):
    4571             :                 case IP_VERSION(3, 1, 5):
    4572             :                 case IP_VERSION(3, 1, 6):
    4573             :                 case IP_VERSION(3, 2, 0):
    4574             :                 case IP_VERSION(3, 2, 1):
    4575           0 :                         adev->mode_info.num_crtc = 4;
    4576           0 :                         adev->mode_info.num_hpd = 4;
    4577           0 :                         adev->mode_info.num_dig = 4;
    4578           0 :                         break;
    4579             :                 default:
    4580           0 :                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
    4581             :                                         adev->ip_versions[DCE_HWIP][0]);
    4582           0 :                         return -EINVAL;
    4583             :                 }
    4584             :                 break;
    4585             :         }
    4586             : 
    4587           0 :         amdgpu_dm_set_irq_funcs(adev);
    4588             : 
    4589           0 :         if (adev->mode_info.funcs == NULL)
    4590           0 :                 adev->mode_info.funcs = &dm_display_funcs;
    4591             : 
    4592             :         /*
    4593             :          * Note: Do NOT change adev->audio_endpt_rreg and
    4594             :          * adev->audio_endpt_wreg because they are initialised in
    4595             :          * amdgpu_device_init()
    4596             :          */
    4597             : #if defined(CONFIG_DEBUG_KERNEL_DC)
    4598             :         device_create_file(
    4599             :                 adev_to_drm(adev)->dev,
    4600             :                 &dev_attr_s3_debug);
    4601             : #endif
    4602             : 
    4603             :         return 0;
    4604             : }
    4605             : 
    4606             : static bool modereset_required(struct drm_crtc_state *crtc_state)
    4607             : {
    4608           0 :         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
    4609             : }
    4610             : 
    4611           0 : static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
    4612             : {
    4613           0 :         drm_encoder_cleanup(encoder);
    4614           0 :         kfree(encoder);
    4615           0 : }
    4616             : 
    4617             : static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
    4618             :         .destroy = amdgpu_dm_encoder_destroy,
    4619             : };
    4620             : 
    4621             : static int
    4622             : fill_plane_color_attributes(const struct drm_plane_state *plane_state,
    4623             :                             const enum surface_pixel_format format,
    4624             :                             enum dc_color_space *color_space)
    4625             : {
    4626             :         bool full_range;
    4627             : 
    4628           0 :         *color_space = COLOR_SPACE_SRGB;
    4629             : 
    4630             :         /* DRM color properties only affect non-RGB formats. */
    4631           0 :         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
    4632             :                 return 0;
    4633             : 
    4634           0 :         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
    4635             : 
    4636           0 :         switch (plane_state->color_encoding) {
    4637             :         case DRM_COLOR_YCBCR_BT601:
    4638           0 :                 if (full_range)
    4639           0 :                         *color_space = COLOR_SPACE_YCBCR601;
    4640             :                 else
    4641           0 :                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
    4642             :                 break;
    4643             : 
    4644             :         case DRM_COLOR_YCBCR_BT709:
    4645           0 :                 if (full_range)
    4646           0 :                         *color_space = COLOR_SPACE_YCBCR709;
    4647             :                 else
    4648           0 :                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
    4649             :                 break;
    4650             : 
    4651             :         case DRM_COLOR_YCBCR_BT2020:
    4652           0 :                 if (full_range)
    4653           0 :                         *color_space = COLOR_SPACE_2020_YCBCR;
    4654             :                 else
    4655             :                         return -EINVAL;
    4656             :                 break;
    4657             : 
    4658             :         default:
    4659             :                 return -EINVAL;
    4660             :         }
    4661             : 
    4662             :         return 0;
    4663             : }
    4664             : 
    4665             : static int
    4666           0 : fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
    4667             :                             const struct drm_plane_state *plane_state,
    4668             :                             const uint64_t tiling_flags,
    4669             :                             struct dc_plane_info *plane_info,
    4670             :                             struct dc_plane_address *address,
    4671             :                             bool tmz_surface,
    4672             :                             bool force_disable_dcc)
    4673             : {
    4674           0 :         const struct drm_framebuffer *fb = plane_state->fb;
    4675           0 :         const struct amdgpu_framebuffer *afb =
    4676           0 :                 to_amdgpu_framebuffer(plane_state->fb);
    4677             :         int ret;
    4678             : 
    4679           0 :         memset(plane_info, 0, sizeof(*plane_info));
    4680             : 
    4681           0 :         switch (fb->format->format) {
    4682             :         case DRM_FORMAT_C8:
    4683           0 :                 plane_info->format =
    4684             :                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
    4685           0 :                 break;
    4686             :         case DRM_FORMAT_RGB565:
    4687           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
    4688           0 :                 break;
    4689             :         case DRM_FORMAT_XRGB8888:
    4690             :         case DRM_FORMAT_ARGB8888:
    4691           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
    4692           0 :                 break;
    4693             :         case DRM_FORMAT_XRGB2101010:
    4694             :         case DRM_FORMAT_ARGB2101010:
    4695           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
    4696           0 :                 break;
    4697             :         case DRM_FORMAT_XBGR2101010:
    4698             :         case DRM_FORMAT_ABGR2101010:
    4699           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
    4700           0 :                 break;
    4701             :         case DRM_FORMAT_XBGR8888:
    4702             :         case DRM_FORMAT_ABGR8888:
    4703           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
    4704           0 :                 break;
    4705             :         case DRM_FORMAT_NV21:
    4706           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
    4707           0 :                 break;
    4708             :         case DRM_FORMAT_NV12:
    4709           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
    4710           0 :                 break;
    4711             :         case DRM_FORMAT_P010:
    4712           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
    4713           0 :                 break;
    4714             :         case DRM_FORMAT_XRGB16161616F:
    4715             :         case DRM_FORMAT_ARGB16161616F:
    4716           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
    4717           0 :                 break;
    4718             :         case DRM_FORMAT_XBGR16161616F:
    4719             :         case DRM_FORMAT_ABGR16161616F:
    4720           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
    4721           0 :                 break;
    4722             :         case DRM_FORMAT_XRGB16161616:
    4723             :         case DRM_FORMAT_ARGB16161616:
    4724           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
    4725           0 :                 break;
    4726             :         case DRM_FORMAT_XBGR16161616:
    4727             :         case DRM_FORMAT_ABGR16161616:
    4728           0 :                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
    4729           0 :                 break;
    4730             :         default:
    4731           0 :                 DRM_ERROR(
    4732             :                         "Unsupported screen format %p4cc\n",
    4733             :                         &fb->format->format);
    4734           0 :                 return -EINVAL;
    4735             :         }
    4736             : 
    4737           0 :         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
    4738             :         case DRM_MODE_ROTATE_0:
    4739           0 :                 plane_info->rotation = ROTATION_ANGLE_0;
    4740           0 :                 break;
    4741             :         case DRM_MODE_ROTATE_90:
    4742           0 :                 plane_info->rotation = ROTATION_ANGLE_90;
    4743           0 :                 break;
    4744             :         case DRM_MODE_ROTATE_180:
    4745           0 :                 plane_info->rotation = ROTATION_ANGLE_180;
    4746           0 :                 break;
    4747             :         case DRM_MODE_ROTATE_270:
    4748           0 :                 plane_info->rotation = ROTATION_ANGLE_270;
    4749           0 :                 break;
    4750             :         default:
    4751           0 :                 plane_info->rotation = ROTATION_ANGLE_0;
    4752           0 :                 break;
    4753             :         }
    4754             : 
    4755             : 
    4756           0 :         plane_info->visible = true;
    4757           0 :         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
    4758             : 
    4759           0 :         plane_info->layer_index = 0;
    4760             : 
    4761           0 :         ret = fill_plane_color_attributes(plane_state, plane_info->format,
    4762             :                                           &plane_info->color_space);
    4763           0 :         if (ret)
    4764             :                 return ret;
    4765             : 
    4766           0 :         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
    4767             :                                            plane_info->rotation, tiling_flags,
    4768             :                                            &plane_info->tiling_info,
    4769             :                                            &plane_info->plane_size,
    4770             :                                            &plane_info->dcc, address,
    4771             :                                            tmz_surface, force_disable_dcc);
    4772           0 :         if (ret)
    4773             :                 return ret;
    4774             : 
    4775           0 :         fill_blending_from_plane_state(
    4776             :                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
    4777             :                 &plane_info->global_alpha, &plane_info->global_alpha_value);
    4778             : 
    4779           0 :         return 0;
    4780             : }
    4781             : 
    4782           0 : static int fill_dc_plane_attributes(struct amdgpu_device *adev,
    4783             :                                     struct dc_plane_state *dc_plane_state,
    4784             :                                     struct drm_plane_state *plane_state,
    4785             :                                     struct drm_crtc_state *crtc_state)
    4786             : {
    4787           0 :         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
    4788           0 :         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
    4789             :         struct dc_scaling_info scaling_info;
    4790             :         struct dc_plane_info plane_info;
    4791             :         int ret;
    4792           0 :         bool force_disable_dcc = false;
    4793             : 
    4794           0 :         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
    4795           0 :         if (ret)
    4796             :                 return ret;
    4797             : 
    4798           0 :         dc_plane_state->src_rect = scaling_info.src_rect;
    4799           0 :         dc_plane_state->dst_rect = scaling_info.dst_rect;
    4800           0 :         dc_plane_state->clip_rect = scaling_info.clip_rect;
    4801           0 :         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
    4802             : 
    4803           0 :         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
    4804           0 :         ret = fill_dc_plane_info_and_addr(adev, plane_state,
    4805             :                                           afb->tiling_flags,
    4806             :                                           &plane_info,
    4807             :                                           &dc_plane_state->address,
    4808           0 :                                           afb->tmz_surface,
    4809             :                                           force_disable_dcc);
    4810           0 :         if (ret)
    4811             :                 return ret;
    4812             : 
    4813           0 :         dc_plane_state->format = plane_info.format;
    4814           0 :         dc_plane_state->color_space = plane_info.color_space;
    4815             :         dc_plane_state->format = plane_info.format;
    4816           0 :         dc_plane_state->plane_size = plane_info.plane_size;
    4817           0 :         dc_plane_state->rotation = plane_info.rotation;
    4818           0 :         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
    4819           0 :         dc_plane_state->stereo_format = plane_info.stereo_format;
    4820           0 :         dc_plane_state->tiling_info = plane_info.tiling_info;
    4821           0 :         dc_plane_state->visible = plane_info.visible;
    4822           0 :         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
    4823           0 :         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
    4824           0 :         dc_plane_state->global_alpha = plane_info.global_alpha;
    4825           0 :         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
    4826           0 :         dc_plane_state->dcc = plane_info.dcc;
    4827           0 :         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
    4828           0 :         dc_plane_state->flip_int_enabled = true;
    4829             : 
    4830             :         /*
    4831             :          * Always set input transfer function, since plane state is refreshed
    4832             :          * every time.
    4833             :          */
    4834           0 :         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
    4835           0 :         if (ret)
    4836             :                 return ret;
    4837             : 
    4838           0 :         return 0;
    4839             : }
    4840             : 
    4841             : /**
    4842             :  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
    4843             :  *
    4844             :  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
    4845             :  *         remote fb
    4846             :  * @old_plane_state: Old state of @plane
    4847             :  * @new_plane_state: New state of @plane
    4848             :  * @crtc_state: New state of CRTC connected to the @plane
    4849             :  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
    4850             :  *
    4851             :  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
    4852             :  * (referred to as "damage clips" in DRM nomenclature) that require updating on
    4853             :  * the eDP remote buffer. The responsibility of specifying the dirty regions is
    4854             :  * amdgpu_dm's.
    4855             :  *
    4856             :  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
    4857             :  * plane with regions that require flushing to the eDP remote buffer. In
    4858             :  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
    4859             :  * implicitly provide damage clips without any client support via the plane
    4860             :  * bounds.
    4861             :  *
    4862             :  * Today, amdgpu_dm only supports the MPO and cursor usecase.
    4863             :  *
    4864             :  * TODO: Also enable for FB_DAMAGE_CLIPS
    4865             :  */
    4866           0 : static void fill_dc_dirty_rects(struct drm_plane *plane,
    4867             :                                 struct drm_plane_state *old_plane_state,
    4868             :                                 struct drm_plane_state *new_plane_state,
    4869             :                                 struct drm_crtc_state *crtc_state,
    4870             :                                 struct dc_flip_addrs *flip_addrs)
    4871             : {
    4872           0 :         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
    4873           0 :         struct rect *dirty_rects = flip_addrs->dirty_rects;
    4874             :         uint32_t num_clips;
    4875             :         bool bb_changed;
    4876             :         bool fb_changed;
    4877           0 :         uint32_t i = 0;
    4878             : 
    4879           0 :         flip_addrs->dirty_rect_count = 0;
    4880             : 
    4881             :         /*
    4882             :          * Cursor plane has it's own dirty rect update interface. See
    4883             :          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
    4884             :          */
    4885           0 :         if (plane->type == DRM_PLANE_TYPE_CURSOR)
    4886             :                 return;
    4887             : 
    4888             :         /*
    4889             :          * Today, we only consider MPO use-case for PSR SU. If MPO not
    4890             :          * requested, and there is a plane update, do FFU.
    4891             :          */
    4892           0 :         if (!dm_crtc_state->mpo_requested) {
    4893           0 :                 dirty_rects[0].x = 0;
    4894           0 :                 dirty_rects[0].y = 0;
    4895           0 :                 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
    4896           0 :                 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
    4897           0 :                 flip_addrs->dirty_rect_count = 1;
    4898           0 :                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
    4899             :                                  new_plane_state->plane->base.id,
    4900             :                                  dm_crtc_state->base.mode.crtc_hdisplay,
    4901             :                                  dm_crtc_state->base.mode.crtc_vdisplay);
    4902             :                 return;
    4903             :         }
    4904             : 
    4905             :         /*
    4906             :          * MPO is requested. Add entire plane bounding box to dirty rects if
    4907             :          * flipped to or damaged.
    4908             :          *
    4909             :          * If plane is moved or resized, also add old bounding box to dirty
    4910             :          * rects.
    4911             :          */
    4912           0 :         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
    4913           0 :         fb_changed = old_plane_state->fb->base.id !=
    4914           0 :                      new_plane_state->fb->base.id;
    4915           0 :         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
    4916             :                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
    4917           0 :                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
    4918             :                       old_plane_state->crtc_h != new_plane_state->crtc_h);
    4919             : 
    4920           0 :         DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
    4921             :                          new_plane_state->plane->base.id,
    4922             :                          bb_changed, fb_changed, num_clips);
    4923             : 
    4924           0 :         if (num_clips || fb_changed || bb_changed) {
    4925           0 :                 dirty_rects[i].x = new_plane_state->crtc_x;
    4926           0 :                 dirty_rects[i].y = new_plane_state->crtc_y;
    4927           0 :                 dirty_rects[i].width = new_plane_state->crtc_w;
    4928           0 :                 dirty_rects[i].height = new_plane_state->crtc_h;
    4929           0 :                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
    4930             :                                  new_plane_state->plane->base.id,
    4931             :                                  dirty_rects[i].x, dirty_rects[i].y,
    4932             :                                  dirty_rects[i].width, dirty_rects[i].height);
    4933           0 :                 i += 1;
    4934             :         }
    4935             : 
    4936             :         /* Add old plane bounding-box if plane is moved or resized */
    4937           0 :         if (bb_changed) {
    4938           0 :                 dirty_rects[i].x = old_plane_state->crtc_x;
    4939           0 :                 dirty_rects[i].y = old_plane_state->crtc_y;
    4940           0 :                 dirty_rects[i].width = old_plane_state->crtc_w;
    4941           0 :                 dirty_rects[i].height = old_plane_state->crtc_h;
    4942           0 :                 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
    4943             :                                 old_plane_state->plane->base.id,
    4944             :                                 dirty_rects[i].x, dirty_rects[i].y,
    4945             :                                 dirty_rects[i].width, dirty_rects[i].height);
    4946           0 :                 i += 1;
    4947             :         }
    4948             : 
    4949           0 :         flip_addrs->dirty_rect_count = i;
    4950             : }
    4951             : 
    4952           0 : static void update_stream_scaling_settings(const struct drm_display_mode *mode,
    4953             :                                            const struct dm_connector_state *dm_state,
    4954             :                                            struct dc_stream_state *stream)
    4955             : {
    4956             :         enum amdgpu_rmx_type rmx_type;
    4957             : 
    4958           0 :         struct rect src = { 0 }; /* viewport in composition space*/
    4959           0 :         struct rect dst = { 0 }; /* stream addressable area */
    4960             : 
    4961             :         /* no mode. nothing to be done */
    4962           0 :         if (!mode)
    4963             :                 return;
    4964             : 
    4965             :         /* Full screen scaling by default */
    4966           0 :         src.width = mode->hdisplay;
    4967           0 :         src.height = mode->vdisplay;
    4968           0 :         dst.width = stream->timing.h_addressable;
    4969           0 :         dst.height = stream->timing.v_addressable;
    4970             : 
    4971           0 :         if (dm_state) {
    4972           0 :                 rmx_type = dm_state->scaling;
    4973           0 :                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
    4974           0 :                         if (src.width * dst.height <
    4975           0 :                                         src.height * dst.width) {
    4976             :                                 /* height needs less upscaling/more downscaling */
    4977           0 :                                 dst.width = src.width *
    4978           0 :                                                 dst.height / src.height;
    4979             :                         } else {
    4980             :                                 /* width needs less upscaling/more downscaling */
    4981           0 :                                 dst.height = src.height *
    4982           0 :                                                 dst.width / src.width;
    4983             :                         }
    4984           0 :                 } else if (rmx_type == RMX_CENTER) {
    4985           0 :                         dst = src;
    4986             :                 }
    4987             : 
    4988           0 :                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
    4989           0 :                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
    4990             : 
    4991           0 :                 if (dm_state->underscan_enable) {
    4992           0 :                         dst.x += dm_state->underscan_hborder / 2;
    4993           0 :                         dst.y += dm_state->underscan_vborder / 2;
    4994           0 :                         dst.width -= dm_state->underscan_hborder;
    4995           0 :                         dst.height -= dm_state->underscan_vborder;
    4996             :                 }
    4997             :         }
    4998             : 
    4999           0 :         stream->src = src;
    5000           0 :         stream->dst = dst;
    5001             : 
    5002           0 :         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
    5003             :                       dst.x, dst.y, dst.width, dst.height);
    5004             : 
    5005             : }
    5006             : 
    5007             : static enum dc_color_depth
    5008           0 : convert_color_depth_from_display_info(const struct drm_connector *connector,
    5009             :                                       bool is_y420, int requested_bpc)
    5010             : {
    5011             :         uint8_t bpc;
    5012             : 
    5013           0 :         if (is_y420) {
    5014           0 :                 bpc = 8;
    5015             : 
    5016             :                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
    5017           0 :                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
    5018             :                         bpc = 16;
    5019           0 :                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
    5020             :                         bpc = 12;
    5021           0 :                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
    5022           0 :                         bpc = 10;
    5023             :         } else {
    5024           0 :                 bpc = (uint8_t)connector->display_info.bpc;
    5025             :                 /* Assume 8 bpc by default if no bpc is specified. */
    5026           0 :                 bpc = bpc ? bpc : 8;
    5027             :         }
    5028             : 
    5029           0 :         if (requested_bpc > 0) {
    5030             :                 /*
    5031             :                  * Cap display bpc based on the user requested value.
    5032             :                  *
    5033             :                  * The value for state->max_bpc may not correctly updated
    5034             :                  * depending on when the connector gets added to the state
    5035             :                  * or if this was called outside of atomic check, so it
    5036             :                  * can't be used directly.
    5037             :                  */
    5038           0 :                 bpc = min_t(u8, bpc, requested_bpc);
    5039             : 
    5040             :                 /* Round down to the nearest even number. */
    5041           0 :                 bpc = bpc - (bpc & 1);
    5042             :         }
    5043             : 
    5044             :         switch (bpc) {
    5045             :         case 0:
    5046             :                 /*
    5047             :                  * Temporary Work around, DRM doesn't parse color depth for
    5048             :                  * EDID revision before 1.4
    5049             :                  * TODO: Fix edid parsing
    5050             :                  */
    5051             :                 return COLOR_DEPTH_888;
    5052             :         case 6:
    5053             :                 return COLOR_DEPTH_666;
    5054             :         case 8:
    5055             :                 return COLOR_DEPTH_888;
    5056             :         case 10:
    5057             :                 return COLOR_DEPTH_101010;
    5058             :         case 12:
    5059             :                 return COLOR_DEPTH_121212;
    5060             :         case 14:
    5061             :                 return COLOR_DEPTH_141414;
    5062             :         case 16:
    5063             :                 return COLOR_DEPTH_161616;
    5064             :         default:
    5065             :                 return COLOR_DEPTH_UNDEFINED;
    5066             :         }
    5067             : }
    5068             : 
    5069             : static enum dc_aspect_ratio
    5070             : get_aspect_ratio(const struct drm_display_mode *mode_in)
    5071             : {
    5072             :         /* 1-1 mapping, since both enums follow the HDMI spec. */
    5073             :         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
    5074             : }
    5075             : 
    5076             : static enum dc_color_space
    5077           0 : get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
    5078             : {
    5079           0 :         enum dc_color_space color_space = COLOR_SPACE_SRGB;
    5080             : 
    5081           0 :         switch (dc_crtc_timing->pixel_encoding)      {
    5082             :         case PIXEL_ENCODING_YCBCR422:
    5083             :         case PIXEL_ENCODING_YCBCR444:
    5084             :         case PIXEL_ENCODING_YCBCR420:
    5085             :         {
    5086             :                 /*
    5087             :                  * 27030khz is the separation point between HDTV and SDTV
    5088             :                  * according to HDMI spec, we use YCbCr709 and YCbCr601
    5089             :                  * respectively
    5090             :                  */
    5091           0 :                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
    5092           0 :                         if (dc_crtc_timing->flags.Y_ONLY)
    5093             :                                 color_space =
    5094             :                                         COLOR_SPACE_YCBCR709_LIMITED;
    5095             :                         else
    5096           0 :                                 color_space = COLOR_SPACE_YCBCR709;
    5097             :                 } else {
    5098           0 :                         if (dc_crtc_timing->flags.Y_ONLY)
    5099             :                                 color_space =
    5100             :                                         COLOR_SPACE_YCBCR601_LIMITED;
    5101             :                         else
    5102           0 :                                 color_space = COLOR_SPACE_YCBCR601;
    5103             :                 }
    5104             : 
    5105             :         }
    5106             :         break;
    5107             :         case PIXEL_ENCODING_RGB:
    5108             :                 color_space = COLOR_SPACE_SRGB;
    5109             :                 break;
    5110             : 
    5111             :         default:
    5112           0 :                 WARN_ON(1);
    5113             :                 break;
    5114             :         }
    5115             : 
    5116           0 :         return color_space;
    5117             : }
    5118             : 
    5119           0 : static bool adjust_colour_depth_from_display_info(
    5120             :         struct dc_crtc_timing *timing_out,
    5121             :         const struct drm_display_info *info)
    5122             : {
    5123           0 :         enum dc_color_depth depth = timing_out->display_color_depth;
    5124             :         int normalized_clk;
    5125             :         do {
    5126           0 :                 normalized_clk = timing_out->pix_clk_100hz / 10;
    5127             :                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
    5128           0 :                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
    5129           0 :                         normalized_clk /= 2;
    5130             :                 /* Adjusting pix clock following on HDMI spec based on colour depth */
    5131           0 :                 switch (depth) {
    5132             :                 case COLOR_DEPTH_888:
    5133             :                         break;
    5134             :                 case COLOR_DEPTH_101010:
    5135           0 :                         normalized_clk = (normalized_clk * 30) / 24;
    5136             :                         break;
    5137             :                 case COLOR_DEPTH_121212:
    5138           0 :                         normalized_clk = (normalized_clk * 36) / 24;
    5139             :                         break;
    5140             :                 case COLOR_DEPTH_161616:
    5141           0 :                         normalized_clk = (normalized_clk * 48) / 24;
    5142             :                         break;
    5143             :                 default:
    5144             :                         /* The above depths are the only ones valid for HDMI. */
    5145             :                         return false;
    5146             :                 }
    5147           0 :                 if (normalized_clk <= info->max_tmds_clock) {
    5148           0 :                         timing_out->display_color_depth = depth;
    5149             :                         return true;
    5150             :                 }
    5151           0 :         } while (--depth > COLOR_DEPTH_666);
    5152             :         return false;
    5153             : }
    5154             : 
    5155           0 : static void fill_stream_properties_from_drm_display_mode(
    5156             :         struct dc_stream_state *stream,
    5157             :         const struct drm_display_mode *mode_in,
    5158             :         const struct drm_connector *connector,
    5159             :         const struct drm_connector_state *connector_state,
    5160             :         const struct dc_stream_state *old_stream,
    5161             :         int requested_bpc)
    5162             : {
    5163           0 :         struct dc_crtc_timing *timing_out = &stream->timing;
    5164           0 :         const struct drm_display_info *info = &connector->display_info;
    5165           0 :         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    5166             :         struct hdmi_vendor_infoframe hv_frame;
    5167             :         struct hdmi_avi_infoframe avi_frame;
    5168             : 
    5169           0 :         memset(&hv_frame, 0, sizeof(hv_frame));
    5170           0 :         memset(&avi_frame, 0, sizeof(avi_frame));
    5171             : 
    5172           0 :         timing_out->h_border_left = 0;
    5173           0 :         timing_out->h_border_right = 0;
    5174           0 :         timing_out->v_border_top = 0;
    5175           0 :         timing_out->v_border_bottom = 0;
    5176             :         /* TODO: un-hardcode */
    5177           0 :         if (drm_mode_is_420_only(info, mode_in)
    5178           0 :                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
    5179           0 :                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
    5180           0 :         else if (drm_mode_is_420_also(info, mode_in)
    5181           0 :                         && aconnector->force_yuv420_output)
    5182           0 :                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
    5183           0 :         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
    5184           0 :                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
    5185           0 :                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
    5186             :         else
    5187           0 :                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
    5188             : 
    5189           0 :         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
    5190           0 :         timing_out->display_color_depth = convert_color_depth_from_display_info(
    5191             :                 connector,
    5192           0 :                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
    5193             :                 requested_bpc);
    5194           0 :         timing_out->scan_type = SCANNING_TYPE_NODATA;
    5195           0 :         timing_out->hdmi_vic = 0;
    5196             : 
    5197           0 :         if (old_stream) {
    5198           0 :                 timing_out->vic = old_stream->timing.vic;
    5199           0 :                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
    5200           0 :                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
    5201             :         } else {
    5202           0 :                 timing_out->vic = drm_match_cea_mode(mode_in);
    5203           0 :                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
    5204           0 :                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
    5205           0 :                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
    5206           0 :                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
    5207             :         }
    5208             : 
    5209           0 :         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
    5210           0 :                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
    5211           0 :                 timing_out->vic = avi_frame.video_code;
    5212           0 :                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
    5213           0 :                 timing_out->hdmi_vic = hv_frame.vic;
    5214             :         }
    5215             : 
    5216           0 :         if (is_freesync_video_mode(mode_in, aconnector)) {
    5217           0 :                 timing_out->h_addressable = mode_in->hdisplay;
    5218           0 :                 timing_out->h_total = mode_in->htotal;
    5219           0 :                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
    5220           0 :                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
    5221           0 :                 timing_out->v_total = mode_in->vtotal;
    5222           0 :                 timing_out->v_addressable = mode_in->vdisplay;
    5223           0 :                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
    5224           0 :                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
    5225           0 :                 timing_out->pix_clk_100hz = mode_in->clock * 10;
    5226             :         } else {
    5227           0 :                 timing_out->h_addressable = mode_in->crtc_hdisplay;
    5228           0 :                 timing_out->h_total = mode_in->crtc_htotal;
    5229           0 :                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
    5230           0 :                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
    5231           0 :                 timing_out->v_total = mode_in->crtc_vtotal;
    5232           0 :                 timing_out->v_addressable = mode_in->crtc_vdisplay;
    5233           0 :                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
    5234           0 :                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
    5235           0 :                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
    5236             :         }
    5237             : 
    5238           0 :         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
    5239             : 
    5240           0 :         stream->output_color_space = get_output_color_space(timing_out);
    5241             : 
    5242           0 :         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
    5243           0 :         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
    5244           0 :         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
    5245           0 :                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
    5246           0 :                     drm_mode_is_420_also(info, mode_in) &&
    5247           0 :                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
    5248           0 :                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
    5249           0 :                         adjust_colour_depth_from_display_info(timing_out, info);
    5250             :                 }
    5251             :         }
    5252           0 : }
    5253             : 
    5254           0 : static void fill_audio_info(struct audio_info *audio_info,
    5255             :                             const struct drm_connector *drm_connector,
    5256             :                             const struct dc_sink *dc_sink)
    5257             : {
    5258           0 :         int i = 0;
    5259           0 :         int cea_revision = 0;
    5260           0 :         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
    5261             : 
    5262           0 :         audio_info->manufacture_id = edid_caps->manufacturer_id;
    5263           0 :         audio_info->product_id = edid_caps->product_id;
    5264             : 
    5265           0 :         cea_revision = drm_connector->display_info.cea_rev;
    5266             : 
    5267           0 :         strscpy(audio_info->display_name,
    5268           0 :                 edid_caps->display_name,
    5269             :                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
    5270             : 
    5271           0 :         if (cea_revision >= 3) {
    5272           0 :                 audio_info->mode_count = edid_caps->audio_mode_count;
    5273             : 
    5274           0 :                 for (i = 0; i < audio_info->mode_count; ++i) {
    5275           0 :                         audio_info->modes[i].format_code =
    5276           0 :                                         (enum audio_format_code)
    5277           0 :                                         (edid_caps->audio_modes[i].format_code);
    5278           0 :                         audio_info->modes[i].channel_count =
    5279           0 :                                         edid_caps->audio_modes[i].channel_count;
    5280           0 :                         audio_info->modes[i].sample_rates.all =
    5281           0 :                                         edid_caps->audio_modes[i].sample_rate;
    5282           0 :                         audio_info->modes[i].sample_size =
    5283           0 :                                         edid_caps->audio_modes[i].sample_size;
    5284             :                 }
    5285             :         }
    5286             : 
    5287           0 :         audio_info->flags.all = edid_caps->speaker_flags;
    5288             : 
    5289             :         /* TODO: We only check for the progressive mode, check for interlace mode too */
    5290           0 :         if (drm_connector->latency_present[0]) {
    5291           0 :                 audio_info->video_latency = drm_connector->video_latency[0];
    5292           0 :                 audio_info->audio_latency = drm_connector->audio_latency[0];
    5293             :         }
    5294             : 
    5295             :         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
    5296             : 
    5297           0 : }
    5298             : 
    5299             : static void
    5300             : copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
    5301             :                                       struct drm_display_mode *dst_mode)
    5302             : {
    5303           0 :         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
    5304           0 :         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
    5305           0 :         dst_mode->crtc_clock = src_mode->crtc_clock;
    5306           0 :         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
    5307           0 :         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
    5308           0 :         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
    5309           0 :         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
    5310           0 :         dst_mode->crtc_htotal = src_mode->crtc_htotal;
    5311           0 :         dst_mode->crtc_hskew = src_mode->crtc_hskew;
    5312           0 :         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
    5313           0 :         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
    5314           0 :         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
    5315           0 :         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
    5316           0 :         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
    5317             : }
    5318             : 
    5319             : static void
    5320           0 : decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
    5321             :                                         const struct drm_display_mode *native_mode,
    5322             :                                         bool scale_enabled)
    5323             : {
    5324           0 :         if (scale_enabled) {
    5325             :                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
    5326           0 :         } else if (native_mode->clock == drm_mode->clock &&
    5327           0 :                         native_mode->htotal == drm_mode->htotal &&
    5328           0 :                         native_mode->vtotal == drm_mode->vtotal) {
    5329             :                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
    5330             :         } else {
    5331             :                 /* no scaling nor amdgpu inserted, no need to patch */
    5332             :         }
    5333           0 : }
    5334             : 
    5335             : static struct dc_sink *
    5336           0 : create_fake_sink(struct amdgpu_dm_connector *aconnector)
    5337             : {
    5338           0 :         struct dc_sink_init_data sink_init_data = { 0 };
    5339           0 :         struct dc_sink *sink = NULL;
    5340           0 :         sink_init_data.link = aconnector->dc_link;
    5341           0 :         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
    5342             : 
    5343           0 :         sink = dc_sink_create(&sink_init_data);
    5344           0 :         if (!sink) {
    5345           0 :                 DRM_ERROR("Failed to create sink!\n");
    5346             :                 return NULL;
    5347             :         }
    5348           0 :         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
    5349             : 
    5350             :         return sink;
    5351             : }
    5352             : 
    5353             : static void set_multisync_trigger_params(
    5354             :                 struct dc_stream_state *stream)
    5355             : {
    5356           0 :         struct dc_stream_state *master = NULL;
    5357             : 
    5358           0 :         if (stream->triggered_crtc_reset.enabled) {
    5359           0 :                 master = stream->triggered_crtc_reset.event_source;
    5360           0 :                 stream->triggered_crtc_reset.event =
    5361           0 :                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
    5362           0 :                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
    5363           0 :                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
    5364             :         }
    5365             : }
    5366             : 
    5367           0 : static void set_master_stream(struct dc_stream_state *stream_set[],
    5368             :                               int stream_count)
    5369             : {
    5370           0 :         int j, highest_rfr = 0, master_stream = 0;
    5371             : 
    5372           0 :         for (j = 0;  j < stream_count; j++) {
    5373           0 :                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
    5374           0 :                         int refresh_rate = 0;
    5375             : 
    5376           0 :                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
    5377           0 :                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
    5378           0 :                         if (refresh_rate > highest_rfr) {
    5379           0 :                                 highest_rfr = refresh_rate;
    5380           0 :                                 master_stream = j;
    5381             :                         }
    5382             :                 }
    5383             :         }
    5384           0 :         for (j = 0;  j < stream_count; j++) {
    5385           0 :                 if (stream_set[j])
    5386           0 :                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
    5387             :         }
    5388           0 : }
    5389             : 
    5390           0 : static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
    5391             : {
    5392           0 :         int i = 0;
    5393             :         struct dc_stream_state *stream;
    5394             : 
    5395           0 :         if (context->stream_count < 2)
    5396             :                 return;
    5397           0 :         for (i = 0; i < context->stream_count ; i++) {
    5398             :                 if (!context->streams[i])
    5399             :                         continue;
    5400             :                 /*
    5401             :                  * TODO: add a function to read AMD VSDB bits and set
    5402             :                  * crtc_sync_master.multi_sync_enabled flag
    5403             :                  * For now it's set to false
    5404             :                  */
    5405             :         }
    5406             : 
    5407           0 :         set_master_stream(context->streams, context->stream_count);
    5408             : 
    5409           0 :         for (i = 0; i < context->stream_count ; i++) {
    5410           0 :                 stream = context->streams[i];
    5411             : 
    5412           0 :                 if (!stream)
    5413           0 :                         continue;
    5414             : 
    5415             :                 set_multisync_trigger_params(stream);
    5416             :         }
    5417             : }
    5418             : 
    5419             : /**
    5420             :  * DOC: FreeSync Video
    5421             :  *
    5422             :  * When a userspace application wants to play a video, the content follows a
    5423             :  * standard format definition that usually specifies the FPS for that format.
    5424             :  * The below list illustrates some video format and the expected FPS,
    5425             :  * respectively:
    5426             :  *
    5427             :  * - TV/NTSC (23.976 FPS)
    5428             :  * - Cinema (24 FPS)
    5429             :  * - TV/PAL (25 FPS)
    5430             :  * - TV/NTSC (29.97 FPS)
    5431             :  * - TV/NTSC (30 FPS)
    5432             :  * - Cinema HFR (48 FPS)
    5433             :  * - TV/PAL (50 FPS)
    5434             :  * - Commonly used (60 FPS)
    5435             :  * - Multiples of 24 (48,72,96 FPS)
    5436             :  *
    5437             :  * The list of standards video format is not huge and can be added to the
    5438             :  * connector modeset list beforehand. With that, userspace can leverage
    5439             :  * FreeSync to extends the front porch in order to attain the target refresh
    5440             :  * rate. Such a switch will happen seamlessly, without screen blanking or
    5441             :  * reprogramming of the output in any other way. If the userspace requests a
    5442             :  * modesetting change compatible with FreeSync modes that only differ in the
    5443             :  * refresh rate, DC will skip the full update and avoid blink during the
    5444             :  * transition. For example, the video player can change the modesetting from
    5445             :  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
    5446             :  * causing any display blink. This same concept can be applied to a mode
    5447             :  * setting change.
    5448             :  */
    5449             : static struct drm_display_mode *
    5450           0 : get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
    5451             :                 bool use_probed_modes)
    5452             : {
    5453           0 :         struct drm_display_mode *m, *m_pref = NULL;
    5454             :         u16 current_refresh, highest_refresh;
    5455           0 :         struct list_head *list_head = use_probed_modes ?
    5456           0 :                 &aconnector->base.probed_modes :
    5457             :                 &aconnector->base.modes;
    5458             : 
    5459           0 :         if (aconnector->freesync_vid_base.clock != 0)
    5460           0 :                 return &aconnector->freesync_vid_base;
    5461             : 
    5462             :         /* Find the preferred mode */
    5463           0 :         list_for_each_entry (m, list_head, head) {
    5464           0 :                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
    5465             :                         m_pref = m;
    5466             :                         break;
    5467             :                 }
    5468             :         }
    5469             : 
    5470           0 :         if (!m_pref) {
    5471             :                 /* Probably an EDID with no preferred mode. Fallback to first entry */
    5472           0 :                 m_pref = list_first_entry_or_null(
    5473             :                                 &aconnector->base.modes, struct drm_display_mode, head);
    5474           0 :                 if (!m_pref) {
    5475           0 :                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
    5476           0 :                         return NULL;
    5477             :                 }
    5478             :         }
    5479             : 
    5480           0 :         highest_refresh = drm_mode_vrefresh(m_pref);
    5481             : 
    5482             :         /*
    5483             :          * Find the mode with highest refresh rate with same resolution.
    5484             :          * For some monitors, preferred mode is not the mode with highest
    5485             :          * supported refresh rate.
    5486             :          */
    5487           0 :         list_for_each_entry (m, list_head, head) {
    5488           0 :                 current_refresh  = drm_mode_vrefresh(m);
    5489             : 
    5490           0 :                 if (m->hdisplay == m_pref->hdisplay &&
    5491           0 :                     m->vdisplay == m_pref->vdisplay &&
    5492             :                     highest_refresh < current_refresh) {
    5493           0 :                         highest_refresh = current_refresh;
    5494           0 :                         m_pref = m;
    5495             :                 }
    5496             :         }
    5497             : 
    5498           0 :         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
    5499           0 :         return m_pref;
    5500             : }
    5501             : 
    5502           0 : static bool is_freesync_video_mode(const struct drm_display_mode *mode,
    5503             :                 struct amdgpu_dm_connector *aconnector)
    5504             : {
    5505             :         struct drm_display_mode *high_mode;
    5506             :         int timing_diff;
    5507             : 
    5508           0 :         high_mode = get_highest_refresh_rate_mode(aconnector, false);
    5509           0 :         if (!high_mode || !mode)
    5510             :                 return false;
    5511             : 
    5512           0 :         timing_diff = high_mode->vtotal - mode->vtotal;
    5513             : 
    5514           0 :         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
    5515           0 :             high_mode->hdisplay != mode->hdisplay ||
    5516           0 :             high_mode->vdisplay != mode->vdisplay ||
    5517           0 :             high_mode->hsync_start != mode->hsync_start ||
    5518             :             high_mode->hsync_end != mode->hsync_end ||
    5519           0 :             high_mode->htotal != mode->htotal ||
    5520           0 :             high_mode->hskew != mode->hskew ||
    5521           0 :             high_mode->vscan != mode->vscan ||
    5522           0 :             high_mode->vsync_start - mode->vsync_start != timing_diff ||
    5523           0 :             high_mode->vsync_end - mode->vsync_end != timing_diff)
    5524             :                 return false;
    5525             :         else
    5526           0 :                 return true;
    5527             : }
    5528             : 
    5529             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    5530           0 : static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
    5531             :                             struct dc_sink *sink, struct dc_stream_state *stream,
    5532             :                             struct dsc_dec_dpcd_caps *dsc_caps)
    5533             : {
    5534           0 :         stream->timing.flags.DSC = 0;
    5535           0 :         dsc_caps->is_dsc_supported = false;
    5536             : 
    5537           0 :         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
    5538             :             sink->sink_signal == SIGNAL_TYPE_EDP)) {
    5539           0 :                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
    5540             :                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
    5541           0 :                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
    5542           0 :                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
    5543           0 :                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
    5544             :                                 dsc_caps);
    5545             :         }
    5546           0 : }
    5547             : 
    5548             : 
    5549           0 : static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
    5550             :                                     struct dc_sink *sink, struct dc_stream_state *stream,
    5551             :                                     struct dsc_dec_dpcd_caps *dsc_caps,
    5552             :                                     uint32_t max_dsc_target_bpp_limit_override)
    5553             : {
    5554           0 :         const struct dc_link_settings *verified_link_cap = NULL;
    5555             :         uint32_t link_bw_in_kbps;
    5556             :         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
    5557           0 :         struct dc *dc = sink->ctx->dc;
    5558           0 :         struct dc_dsc_bw_range bw_range = {0};
    5559           0 :         struct dc_dsc_config dsc_cfg = {0};
    5560             : 
    5561           0 :         verified_link_cap = dc_link_get_link_cap(stream->link);
    5562           0 :         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
    5563           0 :         edp_min_bpp_x16 = 8 * 16;
    5564           0 :         edp_max_bpp_x16 = 8 * 16;
    5565             : 
    5566           0 :         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
    5567           0 :                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
    5568             : 
    5569           0 :         if (edp_max_bpp_x16 < edp_min_bpp_x16)
    5570           0 :                 edp_min_bpp_x16 = edp_max_bpp_x16;
    5571             : 
    5572           0 :         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
    5573           0 :                                 dc->debug.dsc_min_slice_height_override,
    5574             :                                 edp_min_bpp_x16, edp_max_bpp_x16,
    5575             :                                 dsc_caps,
    5576           0 :                                 &stream->timing,
    5577             :                                 &bw_range)) {
    5578             : 
    5579           0 :                 if (bw_range.max_kbps < link_bw_in_kbps) {
    5580           0 :                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
    5581             :                                         dsc_caps,
    5582           0 :                                         dc->debug.dsc_min_slice_height_override,
    5583             :                                         max_dsc_target_bpp_limit_override,
    5584             :                                         0,
    5585             :                                         &stream->timing,
    5586             :                                         &dsc_cfg)) {
    5587           0 :                                 stream->timing.dsc_cfg = dsc_cfg;
    5588           0 :                                 stream->timing.flags.DSC = 1;
    5589           0 :                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
    5590             :                         }
    5591           0 :                         return;
    5592             :                 }
    5593             :         }
    5594             : 
    5595           0 :         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
    5596             :                                 dsc_caps,
    5597           0 :                                 dc->debug.dsc_min_slice_height_override,
    5598             :                                 max_dsc_target_bpp_limit_override,
    5599             :                                 link_bw_in_kbps,
    5600             :                                 &stream->timing,
    5601             :                                 &dsc_cfg)) {
    5602           0 :                 stream->timing.dsc_cfg = dsc_cfg;
    5603           0 :                 stream->timing.flags.DSC = 1;
    5604             :         }
    5605             : }
    5606             : 
    5607             : 
    5608           0 : static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
    5609             :                                         struct dc_sink *sink, struct dc_stream_state *stream,
    5610             :                                         struct dsc_dec_dpcd_caps *dsc_caps)
    5611             : {
    5612           0 :         struct drm_connector *drm_connector = &aconnector->base;
    5613             :         uint32_t link_bandwidth_kbps;
    5614           0 :         uint32_t max_dsc_target_bpp_limit_override = 0;
    5615           0 :         struct dc *dc = sink->ctx->dc;
    5616             :         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
    5617             :         uint32_t dsc_max_supported_bw_in_kbps;
    5618             : 
    5619           0 :         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
    5620           0 :                                                         dc_link_get_link_cap(aconnector->dc_link));
    5621           0 :         if (stream->link && stream->link->local_sink)
    5622           0 :                 max_dsc_target_bpp_limit_override =
    5623             :                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
    5624             : 
    5625             :         /* Set DSC policy according to dsc_clock_en */
    5626           0 :         dc_dsc_policy_set_enable_dsc_when_not_needed(
    5627           0 :                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
    5628             : 
    5629           0 :         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
    5630           0 :             !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
    5631           0 :             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
    5632             : 
    5633           0 :                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
    5634             : 
    5635           0 :         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
    5636           0 :                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
    5637           0 :                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
    5638             :                                                 dsc_caps,
    5639           0 :                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
    5640             :                                                 max_dsc_target_bpp_limit_override,
    5641             :                                                 link_bandwidth_kbps,
    5642           0 :                                                 &stream->timing,
    5643             :                                                 &stream->timing.dsc_cfg)) {
    5644           0 :                                 stream->timing.flags.DSC = 1;
    5645           0 :                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
    5646             :                         }
    5647           0 :                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
    5648           0 :                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
    5649           0 :                         max_supported_bw_in_kbps = link_bandwidth_kbps;
    5650           0 :                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
    5651             : 
    5652           0 :                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
    5653           0 :                                         max_supported_bw_in_kbps > 0 &&
    5654             :                                         dsc_max_supported_bw_in_kbps > 0)
    5655           0 :                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
    5656             :                                                 dsc_caps,
    5657           0 :                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
    5658             :                                                 max_dsc_target_bpp_limit_override,
    5659             :                                                 dsc_max_supported_bw_in_kbps,
    5660             :                                                 &stream->timing,
    5661             :                                                 &stream->timing.dsc_cfg)) {
    5662           0 :                                         stream->timing.flags.DSC = 1;
    5663           0 :                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
    5664             :                                                                          __func__, drm_connector->name);
    5665             :                                 }
    5666             :                 }
    5667             :         }
    5668             : 
    5669             :         /* Overwrite the stream flag if DSC is enabled through debugfs */
    5670           0 :         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
    5671           0 :                 stream->timing.flags.DSC = 1;
    5672             : 
    5673           0 :         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
    5674           0 :                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
    5675             : 
    5676           0 :         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
    5677           0 :                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
    5678             : 
    5679           0 :         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
    5680           0 :                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
    5681           0 : }
    5682             : #endif /* CONFIG_DRM_AMD_DC_DCN */
    5683             : 
    5684             : static struct dc_stream_state *
    5685           0 : create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
    5686             :                        const struct drm_display_mode *drm_mode,
    5687             :                        const struct dm_connector_state *dm_state,
    5688             :                        const struct dc_stream_state *old_stream,
    5689             :                        int requested_bpc)
    5690             : {
    5691           0 :         struct drm_display_mode *preferred_mode = NULL;
    5692             :         struct drm_connector *drm_connector;
    5693           0 :         const struct drm_connector_state *con_state =
    5694             :                 dm_state ? &dm_state->base : NULL;
    5695           0 :         struct dc_stream_state *stream = NULL;
    5696           0 :         struct drm_display_mode mode = *drm_mode;
    5697             :         struct drm_display_mode saved_mode;
    5698           0 :         struct drm_display_mode *freesync_mode = NULL;
    5699           0 :         bool native_mode_found = false;
    5700           0 :         bool recalculate_timing = false;
    5701           0 :         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
    5702             :         int mode_refresh;
    5703           0 :         int preferred_refresh = 0;
    5704             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    5705             :         struct dsc_dec_dpcd_caps dsc_caps;
    5706             : #endif
    5707             : 
    5708           0 :         struct dc_sink *sink = NULL;
    5709             : 
    5710           0 :         memset(&saved_mode, 0, sizeof(saved_mode));
    5711             : 
    5712           0 :         if (aconnector == NULL) {
    5713           0 :                 DRM_ERROR("aconnector is NULL!\n");
    5714           0 :                 return stream;
    5715             :         }
    5716             : 
    5717           0 :         drm_connector = &aconnector->base;
    5718             : 
    5719           0 :         if (!aconnector->dc_sink) {
    5720           0 :                 sink = create_fake_sink(aconnector);
    5721           0 :                 if (!sink)
    5722             :                         return stream;
    5723             :         } else {
    5724           0 :                 sink = aconnector->dc_sink;
    5725           0 :                 dc_sink_retain(sink);
    5726             :         }
    5727             : 
    5728           0 :         stream = dc_create_stream_for_sink(sink);
    5729             : 
    5730           0 :         if (stream == NULL) {
    5731           0 :                 DRM_ERROR("Failed to create stream for sink!\n");
    5732           0 :                 goto finish;
    5733             :         }
    5734             : 
    5735           0 :         stream->dm_stream_context = aconnector;
    5736             : 
    5737           0 :         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
    5738           0 :                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
    5739             : 
    5740           0 :         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
    5741             :                 /* Search for preferred mode */
    5742           0 :                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
    5743             :                         native_mode_found = true;
    5744             :                         break;
    5745             :                 }
    5746             :         }
    5747           0 :         if (!native_mode_found)
    5748           0 :                 preferred_mode = list_first_entry_or_null(
    5749             :                                 &aconnector->base.modes,
    5750             :                                 struct drm_display_mode,
    5751             :                                 head);
    5752             : 
    5753           0 :         mode_refresh = drm_mode_vrefresh(&mode);
    5754             : 
    5755           0 :         if (preferred_mode == NULL) {
    5756             :                 /*
    5757             :                  * This may not be an error, the use case is when we have no
    5758             :                  * usermode calls to reset and set mode upon hotplug. In this
    5759             :                  * case, we call set mode ourselves to restore the previous mode
    5760             :                  * and the modelist may not be filled in in time.
    5761             :                  */
    5762           0 :                 DRM_DEBUG_DRIVER("No preferred mode found\n");
    5763             :         } else {
    5764           0 :                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
    5765           0 :                 if (recalculate_timing) {
    5766           0 :                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
    5767           0 :                         drm_mode_copy(&saved_mode, &mode);
    5768           0 :                         drm_mode_copy(&mode, freesync_mode);
    5769             :                 } else {
    5770           0 :                         decide_crtc_timing_for_drm_display_mode(
    5771             :                                         &mode, preferred_mode, scale);
    5772             : 
    5773           0 :                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
    5774             :                 }
    5775             :         }
    5776             : 
    5777           0 :         if (recalculate_timing)
    5778           0 :                 drm_mode_set_crtcinfo(&saved_mode, 0);
    5779           0 :         else if (!dm_state)
    5780           0 :                 drm_mode_set_crtcinfo(&mode, 0);
    5781             : 
    5782             :         /*
    5783             :         * If scaling is enabled and refresh rate didn't change
    5784             :         * we copy the vic and polarities of the old timings
    5785             :         */
    5786           0 :         if (!scale || mode_refresh != preferred_refresh)
    5787           0 :                 fill_stream_properties_from_drm_display_mode(
    5788             :                         stream, &mode, &aconnector->base, con_state, NULL,
    5789             :                         requested_bpc);
    5790             :         else
    5791           0 :                 fill_stream_properties_from_drm_display_mode(
    5792             :                         stream, &mode, &aconnector->base, con_state, old_stream,
    5793             :                         requested_bpc);
    5794             : 
    5795             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    5796             :         /* SST DSC determination policy */
    5797           0 :         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
    5798           0 :         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
    5799           0 :                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
    5800             : #endif
    5801             : 
    5802           0 :         update_stream_scaling_settings(&mode, dm_state, stream);
    5803             : 
    5804           0 :         fill_audio_info(
    5805             :                 &stream->audio_info,
    5806             :                 drm_connector,
    5807             :                 sink);
    5808             : 
    5809           0 :         update_stream_signal(stream, sink);
    5810             : 
    5811           0 :         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
    5812           0 :                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
    5813             : 
    5814           0 :         if (stream->link->psr_settings.psr_feature_enabled) {
    5815             :                 //
    5816             :                 // should decide stream support vsc sdp colorimetry capability
    5817             :                 // before building vsc info packet
    5818             :                 //
    5819           0 :                 stream->use_vsc_sdp_for_colorimetry = false;
    5820           0 :                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
    5821           0 :                         stream->use_vsc_sdp_for_colorimetry =
    5822           0 :                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
    5823             :                 } else {
    5824           0 :                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
    5825           0 :                                 stream->use_vsc_sdp_for_colorimetry = true;
    5826             :                 }
    5827           0 :                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
    5828           0 :                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
    5829             : 
    5830             :         }
    5831             : finish:
    5832           0 :         dc_sink_release(sink);
    5833             : 
    5834           0 :         return stream;
    5835             : }
    5836             : 
    5837             : static enum drm_connector_status
    5838           0 : amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
    5839             : {
    5840             :         bool connected;
    5841           0 :         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    5842             : 
    5843             :         /*
    5844             :          * Notes:
    5845             :          * 1. This interface is NOT called in context of HPD irq.
    5846             :          * 2. This interface *is called* in context of user-mode ioctl. Which
    5847             :          * makes it a bad place for *any* MST-related activity.
    5848             :          */
    5849             : 
    5850           0 :         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
    5851           0 :             !aconnector->fake_enable)
    5852           0 :                 connected = (aconnector->dc_sink != NULL);
    5853             :         else
    5854           0 :                 connected = (aconnector->base.force == DRM_FORCE_ON ||
    5855             :                                 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
    5856             : 
    5857           0 :         update_subconnector_property(aconnector);
    5858             : 
    5859           0 :         return (connected ? connector_status_connected :
    5860             :                         connector_status_disconnected);
    5861             : }
    5862             : 
    5863           0 : int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
    5864             :                                             struct drm_connector_state *connector_state,
    5865             :                                             struct drm_property *property,
    5866             :                                             uint64_t val)
    5867             : {
    5868           0 :         struct drm_device *dev = connector->dev;
    5869           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    5870           0 :         struct dm_connector_state *dm_old_state =
    5871           0 :                 to_dm_connector_state(connector->state);
    5872           0 :         struct dm_connector_state *dm_new_state =
    5873           0 :                 to_dm_connector_state(connector_state);
    5874             : 
    5875           0 :         int ret = -EINVAL;
    5876             : 
    5877           0 :         if (property == dev->mode_config.scaling_mode_property) {
    5878             :                 enum amdgpu_rmx_type rmx_type;
    5879             : 
    5880             :                 switch (val) {
    5881             :                 case DRM_MODE_SCALE_CENTER:
    5882             :                         rmx_type = RMX_CENTER;
    5883             :                         break;
    5884             :                 case DRM_MODE_SCALE_ASPECT:
    5885             :                         rmx_type = RMX_ASPECT;
    5886             :                         break;
    5887             :                 case DRM_MODE_SCALE_FULLSCREEN:
    5888             :                         rmx_type = RMX_FULL;
    5889             :                         break;
    5890             :                 case DRM_MODE_SCALE_NONE:
    5891             :                 default:
    5892             :                         rmx_type = RMX_OFF;
    5893             :                         break;
    5894             :                 }
    5895             : 
    5896           0 :                 if (dm_old_state->scaling == rmx_type)
    5897             :                         return 0;
    5898             : 
    5899           0 :                 dm_new_state->scaling = rmx_type;
    5900           0 :                 ret = 0;
    5901           0 :         } else if (property == adev->mode_info.underscan_hborder_property) {
    5902           0 :                 dm_new_state->underscan_hborder = val;
    5903           0 :                 ret = 0;
    5904           0 :         } else if (property == adev->mode_info.underscan_vborder_property) {
    5905           0 :                 dm_new_state->underscan_vborder = val;
    5906           0 :                 ret = 0;
    5907           0 :         } else if (property == adev->mode_info.underscan_property) {
    5908           0 :                 dm_new_state->underscan_enable = val;
    5909           0 :                 ret = 0;
    5910           0 :         } else if (property == adev->mode_info.abm_level_property) {
    5911           0 :                 dm_new_state->abm_level = val;
    5912           0 :                 ret = 0;
    5913             :         }
    5914             : 
    5915             :         return ret;
    5916             : }
    5917             : 
    5918           0 : int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
    5919             :                                             const struct drm_connector_state *state,
    5920             :                                             struct drm_property *property,
    5921             :                                             uint64_t *val)
    5922             : {
    5923           0 :         struct drm_device *dev = connector->dev;
    5924           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    5925           0 :         struct dm_connector_state *dm_state =
    5926           0 :                 to_dm_connector_state(state);
    5927           0 :         int ret = -EINVAL;
    5928             : 
    5929           0 :         if (property == dev->mode_config.scaling_mode_property) {
    5930           0 :                 switch (dm_state->scaling) {
    5931             :                 case RMX_CENTER:
    5932           0 :                         *val = DRM_MODE_SCALE_CENTER;
    5933           0 :                         break;
    5934             :                 case RMX_ASPECT:
    5935           0 :                         *val = DRM_MODE_SCALE_ASPECT;
    5936           0 :                         break;
    5937             :                 case RMX_FULL:
    5938           0 :                         *val = DRM_MODE_SCALE_FULLSCREEN;
    5939           0 :                         break;
    5940             :                 case RMX_OFF:
    5941             :                 default:
    5942           0 :                         *val = DRM_MODE_SCALE_NONE;
    5943           0 :                         break;
    5944             :                 }
    5945             :                 ret = 0;
    5946           0 :         } else if (property == adev->mode_info.underscan_hborder_property) {
    5947           0 :                 *val = dm_state->underscan_hborder;
    5948           0 :                 ret = 0;
    5949           0 :         } else if (property == adev->mode_info.underscan_vborder_property) {
    5950           0 :                 *val = dm_state->underscan_vborder;
    5951           0 :                 ret = 0;
    5952           0 :         } else if (property == adev->mode_info.underscan_property) {
    5953           0 :                 *val = dm_state->underscan_enable;
    5954           0 :                 ret = 0;
    5955           0 :         } else if (property == adev->mode_info.abm_level_property) {
    5956           0 :                 *val = dm_state->abm_level;
    5957           0 :                 ret = 0;
    5958             :         }
    5959             : 
    5960           0 :         return ret;
    5961             : }
    5962             : 
    5963           0 : static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
    5964             : {
    5965           0 :         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
    5966             : 
    5967           0 :         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
    5968           0 : }
    5969             : 
    5970           0 : static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
    5971             : {
    5972           0 :         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    5973           0 :         const struct dc_link *link = aconnector->dc_link;
    5974           0 :         struct amdgpu_device *adev = drm_to_adev(connector->dev);
    5975           0 :         struct amdgpu_display_manager *dm = &adev->dm;
    5976             :         int i;
    5977             : 
    5978             :         /*
    5979             :          * Call only if mst_mgr was initialized before since it's not done
    5980             :          * for all connector types.
    5981             :          */
    5982           0 :         if (aconnector->mst_mgr.dev)
    5983           0 :                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
    5984             : 
    5985             : #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
    5986             :         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
    5987           0 :         for (i = 0; i < dm->num_of_edps; i++) {
    5988           0 :                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
    5989           0 :                         backlight_device_unregister(dm->backlight_dev[i]);
    5990           0 :                         dm->backlight_dev[i] = NULL;
    5991             :                 }
    5992             :         }
    5993             : #endif
    5994             : 
    5995           0 :         if (aconnector->dc_em_sink)
    5996           0 :                 dc_sink_release(aconnector->dc_em_sink);
    5997           0 :         aconnector->dc_em_sink = NULL;
    5998           0 :         if (aconnector->dc_sink)
    5999           0 :                 dc_sink_release(aconnector->dc_sink);
    6000           0 :         aconnector->dc_sink = NULL;
    6001             : 
    6002           0 :         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
    6003           0 :         drm_connector_unregister(connector);
    6004           0 :         drm_connector_cleanup(connector);
    6005           0 :         if (aconnector->i2c) {
    6006           0 :                 i2c_del_adapter(&aconnector->i2c->base);
    6007           0 :                 kfree(aconnector->i2c);
    6008             :         }
    6009           0 :         kfree(aconnector->dm_dp_aux.aux.name);
    6010             : 
    6011           0 :         kfree(connector);
    6012           0 : }
    6013             : 
    6014           0 : void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
    6015             : {
    6016           0 :         struct dm_connector_state *state =
    6017           0 :                 to_dm_connector_state(connector->state);
    6018             : 
    6019           0 :         if (connector->state)
    6020           0 :                 __drm_atomic_helper_connector_destroy_state(connector->state);
    6021             : 
    6022           0 :         kfree(state);
    6023             : 
    6024           0 :         state = kzalloc(sizeof(*state), GFP_KERNEL);
    6025             : 
    6026           0 :         if (state) {
    6027           0 :                 state->scaling = RMX_OFF;
    6028           0 :                 state->underscan_enable = false;
    6029           0 :                 state->underscan_hborder = 0;
    6030           0 :                 state->underscan_vborder = 0;
    6031           0 :                 state->base.max_requested_bpc = 8;
    6032           0 :                 state->vcpi_slots = 0;
    6033           0 :                 state->pbn = 0;
    6034             : 
    6035           0 :                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
    6036           0 :                         state->abm_level = amdgpu_dm_abm_level;
    6037             : 
    6038           0 :                 __drm_atomic_helper_connector_reset(connector, &state->base);
    6039             :         }
    6040           0 : }
    6041             : 
    6042             : struct drm_connector_state *
    6043           0 : amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
    6044             : {
    6045           0 :         struct dm_connector_state *state =
    6046           0 :                 to_dm_connector_state(connector->state);
    6047             : 
    6048           0 :         struct dm_connector_state *new_state =
    6049             :                         kmemdup(state, sizeof(*state), GFP_KERNEL);
    6050             : 
    6051           0 :         if (!new_state)
    6052             :                 return NULL;
    6053             : 
    6054           0 :         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
    6055             : 
    6056           0 :         new_state->freesync_capable = state->freesync_capable;
    6057           0 :         new_state->abm_level = state->abm_level;
    6058           0 :         new_state->scaling = state->scaling;
    6059           0 :         new_state->underscan_enable = state->underscan_enable;
    6060           0 :         new_state->underscan_hborder = state->underscan_hborder;
    6061           0 :         new_state->underscan_vborder = state->underscan_vborder;
    6062           0 :         new_state->vcpi_slots = state->vcpi_slots;
    6063           0 :         new_state->pbn = state->pbn;
    6064           0 :         return &new_state->base;
    6065             : }
    6066             : 
    6067             : static int
    6068           0 : amdgpu_dm_connector_late_register(struct drm_connector *connector)
    6069             : {
    6070           0 :         struct amdgpu_dm_connector *amdgpu_dm_connector =
    6071           0 :                 to_amdgpu_dm_connector(connector);
    6072             :         int r;
    6073             : 
    6074           0 :         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
    6075             :             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
    6076           0 :                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
    6077           0 :                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
    6078           0 :                 if (r)
    6079             :                         return r;
    6080             :         }
    6081             : 
    6082             : #if defined(CONFIG_DEBUG_FS)
    6083             :         connector_debugfs_init(amdgpu_dm_connector);
    6084             : #endif
    6085             : 
    6086             :         return 0;
    6087             : }
    6088             : 
    6089             : static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
    6090             :         .reset = amdgpu_dm_connector_funcs_reset,
    6091             :         .detect = amdgpu_dm_connector_detect,
    6092             :         .fill_modes = drm_helper_probe_single_connector_modes,
    6093             :         .destroy = amdgpu_dm_connector_destroy,
    6094             :         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
    6095             :         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
    6096             :         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
    6097             :         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
    6098             :         .late_register = amdgpu_dm_connector_late_register,
    6099             :         .early_unregister = amdgpu_dm_connector_unregister
    6100             : };
    6101             : 
    6102           0 : static int get_modes(struct drm_connector *connector)
    6103             : {
    6104           0 :         return amdgpu_dm_connector_get_modes(connector);
    6105             : }
    6106             : 
    6107           0 : static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
    6108             : {
    6109           0 :         struct dc_sink_init_data init_params = {
    6110           0 :                         .link = aconnector->dc_link,
    6111             :                         .sink_signal = SIGNAL_TYPE_VIRTUAL
    6112             :         };
    6113             :         struct edid *edid;
    6114             : 
    6115           0 :         if (!aconnector->base.edid_blob_ptr) {
    6116           0 :                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
    6117             :                                 aconnector->base.name);
    6118             : 
    6119           0 :                 aconnector->base.force = DRM_FORCE_OFF;
    6120           0 :                 aconnector->base.override_edid = false;
    6121           0 :                 return;
    6122             :         }
    6123             : 
    6124           0 :         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
    6125             : 
    6126           0 :         aconnector->edid = edid;
    6127             : 
    6128           0 :         aconnector->dc_em_sink = dc_link_add_remote_sink(
    6129             :                 aconnector->dc_link,
    6130             :                 (uint8_t *)edid,
    6131           0 :                 (edid->extensions + 1) * EDID_LENGTH,
    6132             :                 &init_params);
    6133             : 
    6134           0 :         if (aconnector->base.force == DRM_FORCE_ON) {
    6135           0 :                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
    6136           0 :                 aconnector->dc_link->local_sink :
    6137             :                 aconnector->dc_em_sink;
    6138           0 :                 dc_sink_retain(aconnector->dc_sink);
    6139             :         }
    6140             : }
    6141             : 
    6142             : static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
    6143             : {
    6144           0 :         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
    6145             : 
    6146             :         /*
    6147             :          * In case of headless boot with force on for DP managed connector
    6148             :          * Those settings have to be != 0 to get initial modeset
    6149             :          */
    6150           0 :         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
    6151           0 :                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
    6152           0 :                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
    6153             :         }
    6154             : 
    6155             : 
    6156           0 :         aconnector->base.override_edid = true;
    6157           0 :         create_eml_sink(aconnector);
    6158             : }
    6159             : 
    6160             : struct dc_stream_state *
    6161           0 : create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
    6162             :                                 const struct drm_display_mode *drm_mode,
    6163             :                                 const struct dm_connector_state *dm_state,
    6164             :                                 const struct dc_stream_state *old_stream)
    6165             : {
    6166           0 :         struct drm_connector *connector = &aconnector->base;
    6167           0 :         struct amdgpu_device *adev = drm_to_adev(connector->dev);
    6168             :         struct dc_stream_state *stream;
    6169           0 :         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
    6170           0 :         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
    6171             :         enum dc_status dc_result = DC_OK;
    6172             : 
    6173             :         do {
    6174           0 :                 stream = create_stream_for_sink(aconnector, drm_mode,
    6175             :                                                 dm_state, old_stream,
    6176             :                                                 requested_bpc);
    6177           0 :                 if (stream == NULL) {
    6178           0 :                         DRM_ERROR("Failed to create stream for sink!\n");
    6179           0 :                         break;
    6180             :                 }
    6181             : 
    6182           0 :                 dc_result = dc_validate_stream(adev->dm.dc, stream);
    6183           0 :                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
    6184           0 :                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
    6185             : 
    6186           0 :                 if (dc_result != DC_OK) {
    6187           0 :                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
    6188             :                                       drm_mode->hdisplay,
    6189             :                                       drm_mode->vdisplay,
    6190             :                                       drm_mode->clock,
    6191             :                                       dc_result,
    6192             :                                       dc_status_to_str(dc_result));
    6193             : 
    6194           0 :                         dc_stream_release(stream);
    6195           0 :                         stream = NULL;
    6196           0 :                         requested_bpc -= 2; /* lower bpc to retry validation */
    6197             :                 }
    6198             : 
    6199           0 :         } while (stream == NULL && requested_bpc >= 6);
    6200             : 
    6201           0 :         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
    6202           0 :                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
    6203             : 
    6204           0 :                 aconnector->force_yuv420_output = true;
    6205           0 :                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
    6206             :                                                 dm_state, old_stream);
    6207           0 :                 aconnector->force_yuv420_output = false;
    6208             :         }
    6209             : 
    6210           0 :         return stream;
    6211             : }
    6212             : 
    6213           0 : enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
    6214             :                                    struct drm_display_mode *mode)
    6215             : {
    6216           0 :         int result = MODE_ERROR;
    6217             :         struct dc_sink *dc_sink;
    6218             :         /* TODO: Unhardcode stream count */
    6219             :         struct dc_stream_state *stream;
    6220           0 :         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    6221             : 
    6222           0 :         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
    6223             :                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
    6224             :                 return result;
    6225             : 
    6226             :         /*
    6227             :          * Only run this the first time mode_valid is called to initilialize
    6228             :          * EDID mgmt
    6229             :          */
    6230           0 :         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
    6231           0 :                 !aconnector->dc_em_sink)
    6232             :                 handle_edid_mgmt(aconnector);
    6233             : 
    6234           0 :         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
    6235             : 
    6236           0 :         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
    6237             :                                 aconnector->base.force != DRM_FORCE_ON) {
    6238           0 :                 DRM_ERROR("dc_sink is NULL!\n");
    6239           0 :                 goto fail;
    6240             :         }
    6241             : 
    6242           0 :         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
    6243           0 :         if (stream) {
    6244           0 :                 dc_stream_release(stream);
    6245           0 :                 result = MODE_OK;
    6246             :         }
    6247             : 
    6248             : fail:
    6249             :         /* TODO: error handling*/
    6250             :         return result;
    6251             : }
    6252             : 
    6253           0 : static int fill_hdr_info_packet(const struct drm_connector_state *state,
    6254             :                                 struct dc_info_packet *out)
    6255             : {
    6256             :         struct hdmi_drm_infoframe frame;
    6257             :         unsigned char buf[30]; /* 26 + 4 */
    6258             :         ssize_t len;
    6259             :         int ret, i;
    6260             : 
    6261           0 :         memset(out, 0, sizeof(*out));
    6262             : 
    6263           0 :         if (!state->hdr_output_metadata)
    6264             :                 return 0;
    6265             : 
    6266           0 :         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
    6267           0 :         if (ret)
    6268             :                 return ret;
    6269             : 
    6270           0 :         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
    6271           0 :         if (len < 0)
    6272           0 :                 return (int)len;
    6273             : 
    6274             :         /* Static metadata is a fixed 26 bytes + 4 byte header. */
    6275           0 :         if (len != 30)
    6276             :                 return -EINVAL;
    6277             : 
    6278             :         /* Prepare the infopacket for DC. */
    6279           0 :         switch (state->connector->connector_type) {
    6280             :         case DRM_MODE_CONNECTOR_HDMIA:
    6281           0 :                 out->hb0 = 0x87; /* type */
    6282           0 :                 out->hb1 = 0x01; /* version */
    6283           0 :                 out->hb2 = 0x1A; /* length */
    6284           0 :                 out->sb[0] = buf[3]; /* checksum */
    6285           0 :                 i = 1;
    6286           0 :                 break;
    6287             : 
    6288             :         case DRM_MODE_CONNECTOR_DisplayPort:
    6289             :         case DRM_MODE_CONNECTOR_eDP:
    6290           0 :                 out->hb0 = 0x00; /* sdp id, zero */
    6291           0 :                 out->hb1 = 0x87; /* type */
    6292           0 :                 out->hb2 = 0x1D; /* payload len - 1 */
    6293           0 :                 out->hb3 = (0x13 << 2); /* sdp version */
    6294           0 :                 out->sb[0] = 0x01; /* version */
    6295           0 :                 out->sb[1] = 0x1A; /* length */
    6296           0 :                 i = 2;
    6297           0 :                 break;
    6298             : 
    6299             :         default:
    6300             :                 return -EINVAL;
    6301             :         }
    6302             : 
    6303           0 :         memcpy(&out->sb[i], &buf[4], 26);
    6304           0 :         out->valid = true;
    6305             : 
    6306           0 :         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
    6307             :                        sizeof(out->sb), false);
    6308             : 
    6309           0 :         return 0;
    6310             : }
    6311             : 
    6312             : static int
    6313           0 : amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
    6314             :                                  struct drm_atomic_state *state)
    6315             : {
    6316           0 :         struct drm_connector_state *new_con_state =
    6317             :                 drm_atomic_get_new_connector_state(state, conn);
    6318           0 :         struct drm_connector_state *old_con_state =
    6319             :                 drm_atomic_get_old_connector_state(state, conn);
    6320           0 :         struct drm_crtc *crtc = new_con_state->crtc;
    6321             :         struct drm_crtc_state *new_crtc_state;
    6322             :         int ret;
    6323             : 
    6324             :         trace_amdgpu_dm_connector_atomic_check(new_con_state);
    6325             : 
    6326           0 :         if (!crtc)
    6327             :                 return 0;
    6328             : 
    6329           0 :         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
    6330             :                 struct dc_info_packet hdr_infopacket;
    6331             : 
    6332           0 :                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
    6333           0 :                 if (ret)
    6334           0 :                         return ret;
    6335             : 
    6336           0 :                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
    6337           0 :                 if (IS_ERR(new_crtc_state))
    6338           0 :                         return PTR_ERR(new_crtc_state);
    6339             : 
    6340             :                 /*
    6341             :                  * DC considers the stream backends changed if the
    6342             :                  * static metadata changes. Forcing the modeset also
    6343             :                  * gives a simple way for userspace to switch from
    6344             :                  * 8bpc to 10bpc when setting the metadata to enter
    6345             :                  * or exit HDR.
    6346             :                  *
    6347             :                  * Changing the static metadata after it's been
    6348             :                  * set is permissible, however. So only force a
    6349             :                  * modeset if we're entering or exiting HDR.
    6350             :                  */
    6351           0 :                 new_crtc_state->mode_changed =
    6352           0 :                         !old_con_state->hdr_output_metadata ||
    6353           0 :                         !new_con_state->hdr_output_metadata;
    6354             :         }
    6355             : 
    6356             :         return 0;
    6357             : }
    6358             : 
    6359             : static const struct drm_connector_helper_funcs
    6360             : amdgpu_dm_connector_helper_funcs = {
    6361             :         /*
    6362             :          * If hotplugging a second bigger display in FB Con mode, bigger resolution
    6363             :          * modes will be filtered by drm_mode_validate_size(), and those modes
    6364             :          * are missing after user start lightdm. So we need to renew modes list.
    6365             :          * in get_modes call back, not just return the modes count
    6366             :          */
    6367             :         .get_modes = get_modes,
    6368             :         .mode_valid = amdgpu_dm_connector_mode_valid,
    6369             :         .atomic_check = amdgpu_dm_connector_atomic_check,
    6370             : };
    6371             : 
    6372           0 : static void dm_encoder_helper_disable(struct drm_encoder *encoder)
    6373             : {
    6374             : 
    6375           0 : }
    6376             : 
    6377           0 : int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
    6378             : {
    6379             :         switch (display_color_depth) {
    6380             :         case COLOR_DEPTH_666:
    6381             :                 return 6;
    6382             :         case COLOR_DEPTH_888:
    6383             :                 return 8;
    6384             :         case COLOR_DEPTH_101010:
    6385             :                 return 10;
    6386             :         case COLOR_DEPTH_121212:
    6387             :                 return 12;
    6388             :         case COLOR_DEPTH_141414:
    6389             :                 return 14;
    6390             :         case COLOR_DEPTH_161616:
    6391             :                 return 16;
    6392             :         default:
    6393             :                 break;
    6394             :         }
    6395             :         return 0;
    6396             : }
    6397             : 
    6398           0 : static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
    6399             :                                           struct drm_crtc_state *crtc_state,
    6400             :                                           struct drm_connector_state *conn_state)
    6401             : {
    6402           0 :         struct drm_atomic_state *state = crtc_state->state;
    6403           0 :         struct drm_connector *connector = conn_state->connector;
    6404           0 :         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    6405           0 :         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
    6406           0 :         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
    6407             :         struct drm_dp_mst_topology_mgr *mst_mgr;
    6408             :         struct drm_dp_mst_port *mst_port;
    6409             :         enum dc_color_depth color_depth;
    6410           0 :         int clock, bpp = 0;
    6411           0 :         bool is_y420 = false;
    6412             : 
    6413           0 :         if (!aconnector->port || !aconnector->dc_sink)
    6414             :                 return 0;
    6415             : 
    6416           0 :         mst_port = aconnector->port;
    6417           0 :         mst_mgr = &aconnector->mst_port->mst_mgr;
    6418             : 
    6419           0 :         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
    6420             :                 return 0;
    6421             : 
    6422           0 :         if (!state->duplicated) {
    6423           0 :                 int max_bpc = conn_state->max_requested_bpc;
    6424           0 :                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
    6425           0 :                           aconnector->force_yuv420_output;
    6426           0 :                 color_depth = convert_color_depth_from_display_info(connector,
    6427             :                                                                     is_y420,
    6428             :                                                                     max_bpc);
    6429           0 :                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
    6430           0 :                 clock = adjusted_mode->clock;
    6431           0 :                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
    6432             :         }
    6433           0 :         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
    6434             :                                                                            mst_mgr,
    6435             :                                                                            mst_port,
    6436           0 :                                                                            dm_new_connector_state->pbn,
    6437             :                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
    6438           0 :         if (dm_new_connector_state->vcpi_slots < 0) {
    6439           0 :                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
    6440           0 :                 return dm_new_connector_state->vcpi_slots;
    6441             :         }
    6442             :         return 0;
    6443             : }
    6444             : 
    6445             : const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
    6446             :         .disable = dm_encoder_helper_disable,
    6447             :         .atomic_check = dm_encoder_helper_atomic_check
    6448             : };
    6449             : 
    6450             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    6451           0 : static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
    6452             :                                             struct dc_state *dc_state,
    6453             :                                             struct dsc_mst_fairness_vars *vars)
    6454             : {
    6455           0 :         struct dc_stream_state *stream = NULL;
    6456             :         struct drm_connector *connector;
    6457             :         struct drm_connector_state *new_con_state;
    6458             :         struct amdgpu_dm_connector *aconnector;
    6459             :         struct dm_connector_state *dm_conn_state;
    6460             :         int i, j;
    6461           0 :         int vcpi, pbn_div, pbn, slot_num = 0;
    6462             : 
    6463           0 :         for_each_new_connector_in_state(state, connector, new_con_state, i) {
    6464             : 
    6465           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    6466             : 
    6467           0 :                 if (!aconnector->port)
    6468           0 :                         continue;
    6469             : 
    6470           0 :                 if (!new_con_state || !new_con_state->crtc)
    6471           0 :                         continue;
    6472             : 
    6473             :                 dm_conn_state = to_dm_connector_state(new_con_state);
    6474             : 
    6475           0 :                 for (j = 0; j < dc_state->stream_count; j++) {
    6476           0 :                         stream = dc_state->streams[j];
    6477           0 :                         if (!stream)
    6478           0 :                                 continue;
    6479             : 
    6480           0 :                         if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
    6481             :                                 break;
    6482             : 
    6483             :                         stream = NULL;
    6484             :                 }
    6485             : 
    6486           0 :                 if (!stream)
    6487           0 :                         continue;
    6488             : 
    6489           0 :                 pbn_div = dm_mst_get_pbn_divider(stream->link);
    6490             :                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
    6491           0 :                 for (j = 0; j < dc_state->stream_count; j++) {
    6492           0 :                         if (vars[j].aconnector == aconnector) {
    6493           0 :                                 pbn = vars[j].pbn;
    6494           0 :                                 break;
    6495             :                         }
    6496             :                 }
    6497             : 
    6498           0 :                 if (j == dc_state->stream_count)
    6499           0 :                         continue;
    6500             : 
    6501           0 :                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
    6502             : 
    6503           0 :                 if (stream->timing.flags.DSC != 1) {
    6504           0 :                         dm_conn_state->pbn = pbn;
    6505           0 :                         dm_conn_state->vcpi_slots = slot_num;
    6506             : 
    6507           0 :                         drm_dp_mst_atomic_enable_dsc(state,
    6508             :                                                      aconnector->port,
    6509             :                                                      dm_conn_state->pbn,
    6510             :                                                      0,
    6511             :                                                      false);
    6512           0 :                         continue;
    6513             :                 }
    6514             : 
    6515           0 :                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
    6516             :                                                     aconnector->port,
    6517             :                                                     pbn, pbn_div,
    6518             :                                                     true);
    6519           0 :                 if (vcpi < 0)
    6520             :                         return vcpi;
    6521             : 
    6522           0 :                 dm_conn_state->pbn = pbn;
    6523           0 :                 dm_conn_state->vcpi_slots = vcpi;
    6524             :         }
    6525             :         return 0;
    6526             : }
    6527             : #endif
    6528             : 
    6529           0 : static int to_drm_connector_type(enum signal_type st)
    6530             : {
    6531           0 :         switch (st) {
    6532             :         case SIGNAL_TYPE_HDMI_TYPE_A:
    6533             :                 return DRM_MODE_CONNECTOR_HDMIA;
    6534             :         case SIGNAL_TYPE_EDP:
    6535           0 :                 return DRM_MODE_CONNECTOR_eDP;
    6536             :         case SIGNAL_TYPE_LVDS:
    6537           0 :                 return DRM_MODE_CONNECTOR_LVDS;
    6538             :         case SIGNAL_TYPE_RGB:
    6539           0 :                 return DRM_MODE_CONNECTOR_VGA;
    6540             :         case SIGNAL_TYPE_DISPLAY_PORT:
    6541             :         case SIGNAL_TYPE_DISPLAY_PORT_MST:
    6542           0 :                 return DRM_MODE_CONNECTOR_DisplayPort;
    6543             :         case SIGNAL_TYPE_DVI_DUAL_LINK:
    6544             :         case SIGNAL_TYPE_DVI_SINGLE_LINK:
    6545           0 :                 return DRM_MODE_CONNECTOR_DVID;
    6546             :         case SIGNAL_TYPE_VIRTUAL:
    6547           0 :                 return DRM_MODE_CONNECTOR_VIRTUAL;
    6548             : 
    6549             :         default:
    6550           0 :                 return DRM_MODE_CONNECTOR_Unknown;
    6551             :         }
    6552             : }
    6553             : 
    6554             : static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
    6555             : {
    6556             :         struct drm_encoder *encoder;
    6557             : 
    6558             :         /* There is only one encoder per connector */
    6559           0 :         drm_connector_for_each_possible_encoder(connector, encoder)
    6560             :                 return encoder;
    6561             : 
    6562             :         return NULL;
    6563             : }
    6564             : 
    6565           0 : static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
    6566             : {
    6567             :         struct drm_encoder *encoder;
    6568             :         struct amdgpu_encoder *amdgpu_encoder;
    6569             : 
    6570           0 :         encoder = amdgpu_dm_connector_to_encoder(connector);
    6571             : 
    6572           0 :         if (encoder == NULL)
    6573             :                 return;
    6574             : 
    6575           0 :         amdgpu_encoder = to_amdgpu_encoder(encoder);
    6576             : 
    6577           0 :         amdgpu_encoder->native_mode.clock = 0;
    6578             : 
    6579           0 :         if (!list_empty(&connector->probed_modes)) {
    6580           0 :                 struct drm_display_mode *preferred_mode = NULL;
    6581             : 
    6582           0 :                 list_for_each_entry(preferred_mode,
    6583             :                                     &connector->probed_modes,
    6584             :                                     head) {
    6585           0 :                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
    6586           0 :                                 amdgpu_encoder->native_mode = *preferred_mode;
    6587             : 
    6588             :                         break;
    6589             :                 }
    6590             : 
    6591             :         }
    6592             : }
    6593             : 
    6594             : static struct drm_display_mode *
    6595           0 : amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
    6596             :                              char *name,
    6597             :                              int hdisplay, int vdisplay)
    6598             : {
    6599           0 :         struct drm_device *dev = encoder->dev;
    6600           0 :         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
    6601           0 :         struct drm_display_mode *mode = NULL;
    6602           0 :         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
    6603             : 
    6604           0 :         mode = drm_mode_duplicate(dev, native_mode);
    6605             : 
    6606           0 :         if (mode == NULL)
    6607             :                 return NULL;
    6608             : 
    6609           0 :         mode->hdisplay = hdisplay;
    6610           0 :         mode->vdisplay = vdisplay;
    6611           0 :         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
    6612           0 :         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
    6613             : 
    6614           0 :         return mode;
    6615             : 
    6616             : }
    6617             : 
    6618           0 : static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
    6619             :                                                  struct drm_connector *connector)
    6620             : {
    6621           0 :         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
    6622           0 :         struct drm_display_mode *mode = NULL;
    6623           0 :         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
    6624           0 :         struct amdgpu_dm_connector *amdgpu_dm_connector =
    6625           0 :                                 to_amdgpu_dm_connector(connector);
    6626             :         int i;
    6627             :         int n;
    6628             :         struct mode_size {
    6629             :                 char name[DRM_DISPLAY_MODE_LEN];
    6630             :                 int w;
    6631             :                 int h;
    6632           0 :         } common_modes[] = {
    6633             :                 {  "640x480",  640,  480},
    6634             :                 {  "800x600",  800,  600},
    6635             :                 { "1024x768", 1024,  768},
    6636             :                 { "1280x720", 1280,  720},
    6637             :                 { "1280x800", 1280,  800},
    6638             :                 {"1280x1024", 1280, 1024},
    6639             :                 { "1440x900", 1440,  900},
    6640             :                 {"1680x1050", 1680, 1050},
    6641             :                 {"1600x1200", 1600, 1200},
    6642             :                 {"1920x1080", 1920, 1080},
    6643             :                 {"1920x1200", 1920, 1200}
    6644             :         };
    6645             : 
    6646           0 :         n = ARRAY_SIZE(common_modes);
    6647             : 
    6648           0 :         for (i = 0; i < n; i++) {
    6649           0 :                 struct drm_display_mode *curmode = NULL;
    6650           0 :                 bool mode_existed = false;
    6651             : 
    6652           0 :                 if (common_modes[i].w > native_mode->hdisplay ||
    6653           0 :                     common_modes[i].h > native_mode->vdisplay ||
    6654           0 :                    (common_modes[i].w == native_mode->hdisplay &&
    6655             :                     common_modes[i].h == native_mode->vdisplay))
    6656           0 :                         continue;
    6657             : 
    6658           0 :                 list_for_each_entry(curmode, &connector->probed_modes, head) {
    6659           0 :                         if (common_modes[i].w == curmode->hdisplay &&
    6660           0 :                             common_modes[i].h == curmode->vdisplay) {
    6661             :                                 mode_existed = true;
    6662             :                                 break;
    6663             :                         }
    6664             :                 }
    6665             : 
    6666           0 :                 if (mode_existed)
    6667           0 :                         continue;
    6668             : 
    6669           0 :                 mode = amdgpu_dm_create_common_mode(encoder,
    6670           0 :                                 common_modes[i].name, common_modes[i].w,
    6671             :                                 common_modes[i].h);
    6672           0 :                 if (!mode)
    6673           0 :                         continue;
    6674             : 
    6675           0 :                 drm_mode_probed_add(connector, mode);
    6676           0 :                 amdgpu_dm_connector->num_modes++;
    6677             :         }
    6678           0 : }
    6679             : 
    6680           0 : static void amdgpu_set_panel_orientation(struct drm_connector *connector)
    6681             : {
    6682             :         struct drm_encoder *encoder;
    6683             :         struct amdgpu_encoder *amdgpu_encoder;
    6684             :         const struct drm_display_mode *native_mode;
    6685             : 
    6686           0 :         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
    6687             :             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
    6688             :                 return;
    6689             : 
    6690           0 :         mutex_lock(&connector->dev->mode_config.mutex);
    6691           0 :         amdgpu_dm_connector_get_modes(connector);
    6692           0 :         mutex_unlock(&connector->dev->mode_config.mutex);
    6693             : 
    6694           0 :         encoder = amdgpu_dm_connector_to_encoder(connector);
    6695           0 :         if (!encoder)
    6696             :                 return;
    6697             : 
    6698           0 :         amdgpu_encoder = to_amdgpu_encoder(encoder);
    6699             : 
    6700           0 :         native_mode = &amdgpu_encoder->native_mode;
    6701           0 :         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
    6702             :                 return;
    6703             : 
    6704           0 :         drm_connector_set_panel_orientation_with_quirk(connector,
    6705             :                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
    6706             :                                                        native_mode->hdisplay,
    6707             :                                                        native_mode->vdisplay);
    6708             : }
    6709             : 
    6710           0 : static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
    6711             :                                               struct edid *edid)
    6712             : {
    6713           0 :         struct amdgpu_dm_connector *amdgpu_dm_connector =
    6714           0 :                         to_amdgpu_dm_connector(connector);
    6715             : 
    6716           0 :         if (edid) {
    6717             :                 /* empty probed_modes */
    6718           0 :                 INIT_LIST_HEAD(&connector->probed_modes);
    6719           0 :                 amdgpu_dm_connector->num_modes =
    6720           0 :                                 drm_add_edid_modes(connector, edid);
    6721             : 
    6722             :                 /* sorting the probed modes before calling function
    6723             :                  * amdgpu_dm_get_native_mode() since EDID can have
    6724             :                  * more than one preferred mode. The modes that are
    6725             :                  * later in the probed mode list could be of higher
    6726             :                  * and preferred resolution. For example, 3840x2160
    6727             :                  * resolution in base EDID preferred timing and 4096x2160
    6728             :                  * preferred resolution in DID extension block later.
    6729             :                  */
    6730           0 :                 drm_mode_sort(&connector->probed_modes);
    6731           0 :                 amdgpu_dm_get_native_mode(connector);
    6732             : 
    6733             :                 /* Freesync capabilities are reset by calling
    6734             :                  * drm_add_edid_modes() and need to be
    6735             :                  * restored here.
    6736             :                  */
    6737           0 :                 amdgpu_dm_update_freesync_caps(connector, edid);
    6738             :         } else {
    6739           0 :                 amdgpu_dm_connector->num_modes = 0;
    6740             :         }
    6741           0 : }
    6742             : 
    6743           0 : static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
    6744             :                               struct drm_display_mode *mode)
    6745             : {
    6746             :         struct drm_display_mode *m;
    6747             : 
    6748           0 :         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
    6749           0 :                 if (drm_mode_equal(m, mode))
    6750             :                         return true;
    6751             :         }
    6752             : 
    6753             :         return false;
    6754             : }
    6755             : 
    6756           0 : static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
    6757             : {
    6758             :         const struct drm_display_mode *m;
    6759             :         struct drm_display_mode *new_mode;
    6760             :         uint i;
    6761           0 :         uint32_t new_modes_count = 0;
    6762             : 
    6763             :         /* Standard FPS values
    6764             :          *
    6765             :          * 23.976       - TV/NTSC
    6766             :          * 24           - Cinema
    6767             :          * 25           - TV/PAL
    6768             :          * 29.97        - TV/NTSC
    6769             :          * 30           - TV/NTSC
    6770             :          * 48           - Cinema HFR
    6771             :          * 50           - TV/PAL
    6772             :          * 60           - Commonly used
    6773             :          * 48,72,96,120 - Multiples of 24
    6774             :          */
    6775             :         static const uint32_t common_rates[] = {
    6776             :                 23976, 24000, 25000, 29970, 30000,
    6777             :                 48000, 50000, 60000, 72000, 96000, 120000
    6778             :         };
    6779             : 
    6780             :         /*
    6781             :          * Find mode with highest refresh rate with the same resolution
    6782             :          * as the preferred mode. Some monitors report a preferred mode
    6783             :          * with lower resolution than the highest refresh rate supported.
    6784             :          */
    6785             : 
    6786           0 :         m = get_highest_refresh_rate_mode(aconnector, true);
    6787           0 :         if (!m)
    6788             :                 return 0;
    6789             : 
    6790           0 :         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
    6791             :                 uint64_t target_vtotal, target_vtotal_diff;
    6792             :                 uint64_t num, den;
    6793             : 
    6794           0 :                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
    6795           0 :                         continue;
    6796             : 
    6797           0 :                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
    6798           0 :                     common_rates[i] > aconnector->max_vfreq * 1000)
    6799           0 :                         continue;
    6800             : 
    6801           0 :                 num = (unsigned long long)m->clock * 1000 * 1000;
    6802           0 :                 den = common_rates[i] * (unsigned long long)m->htotal;
    6803           0 :                 target_vtotal = div_u64(num, den);
    6804           0 :                 target_vtotal_diff = target_vtotal - m->vtotal;
    6805             : 
    6806             :                 /* Check for illegal modes */
    6807           0 :                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
    6808           0 :                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
    6809             :                     m->vtotal + target_vtotal_diff < m->vsync_end)
    6810           0 :                         continue;
    6811             : 
    6812           0 :                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
    6813           0 :                 if (!new_mode)
    6814             :                         goto out;
    6815             : 
    6816           0 :                 new_mode->vtotal += (u16)target_vtotal_diff;
    6817           0 :                 new_mode->vsync_start += (u16)target_vtotal_diff;
    6818           0 :                 new_mode->vsync_end += (u16)target_vtotal_diff;
    6819           0 :                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
    6820           0 :                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
    6821             : 
    6822           0 :                 if (!is_duplicate_mode(aconnector, new_mode)) {
    6823           0 :                         drm_mode_probed_add(&aconnector->base, new_mode);
    6824           0 :                         new_modes_count += 1;
    6825             :                 } else
    6826           0 :                         drm_mode_destroy(aconnector->base.dev, new_mode);
    6827             :         }
    6828             :  out:
    6829             :         return new_modes_count;
    6830             : }
    6831             : 
    6832           0 : static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
    6833             :                                                    struct edid *edid)
    6834             : {
    6835           0 :         struct amdgpu_dm_connector *amdgpu_dm_connector =
    6836           0 :                 to_amdgpu_dm_connector(connector);
    6837             : 
    6838           0 :         if (!edid)
    6839             :                 return;
    6840             : 
    6841           0 :         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
    6842           0 :                 amdgpu_dm_connector->num_modes +=
    6843           0 :                         add_fs_modes(amdgpu_dm_connector);
    6844             : }
    6845             : 
    6846           0 : static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
    6847             : {
    6848           0 :         struct amdgpu_dm_connector *amdgpu_dm_connector =
    6849           0 :                         to_amdgpu_dm_connector(connector);
    6850             :         struct drm_encoder *encoder;
    6851           0 :         struct edid *edid = amdgpu_dm_connector->edid;
    6852             : 
    6853           0 :         encoder = amdgpu_dm_connector_to_encoder(connector);
    6854             : 
    6855           0 :         if (!drm_edid_is_valid(edid)) {
    6856           0 :                 amdgpu_dm_connector->num_modes =
    6857           0 :                                 drm_add_modes_noedid(connector, 640, 480);
    6858             :         } else {
    6859           0 :                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
    6860           0 :                 amdgpu_dm_connector_add_common_modes(encoder, connector);
    6861           0 :                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
    6862             :         }
    6863           0 :         amdgpu_dm_fbc_init(connector);
    6864             : 
    6865           0 :         return amdgpu_dm_connector->num_modes;
    6866             : }
    6867             : 
    6868           0 : void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
    6869             :                                      struct amdgpu_dm_connector *aconnector,
    6870             :                                      int connector_type,
    6871             :                                      struct dc_link *link,
    6872             :                                      int link_index)
    6873             : {
    6874           0 :         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
    6875             : 
    6876             :         /*
    6877             :          * Some of the properties below require access to state, like bpc.
    6878             :          * Allocate some default initial connector state with our reset helper.
    6879             :          */
    6880           0 :         if (aconnector->base.funcs->reset)
    6881           0 :                 aconnector->base.funcs->reset(&aconnector->base);
    6882             : 
    6883           0 :         aconnector->connector_id = link_index;
    6884           0 :         aconnector->dc_link = link;
    6885           0 :         aconnector->base.interlace_allowed = false;
    6886           0 :         aconnector->base.doublescan_allowed = false;
    6887           0 :         aconnector->base.stereo_allowed = false;
    6888           0 :         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
    6889           0 :         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
    6890           0 :         aconnector->audio_inst = -1;
    6891           0 :         mutex_init(&aconnector->hpd_lock);
    6892             : 
    6893             :         /*
    6894             :          * configure support HPD hot plug connector_>polled default value is 0
    6895             :          * which means HPD hot plug not supported
    6896             :          */
    6897           0 :         switch (connector_type) {
    6898             :         case DRM_MODE_CONNECTOR_HDMIA:
    6899           0 :                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
    6900           0 :                 aconnector->base.ycbcr_420_allowed =
    6901           0 :                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
    6902           0 :                 break;
    6903             :         case DRM_MODE_CONNECTOR_DisplayPort:
    6904           0 :                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
    6905           0 :                 link->link_enc = link_enc_cfg_get_link_enc(link);
    6906           0 :                 ASSERT(link->link_enc);
    6907           0 :                 if (link->link_enc)
    6908           0 :                         aconnector->base.ycbcr_420_allowed =
    6909           0 :                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
    6910             :                 break;
    6911             :         case DRM_MODE_CONNECTOR_DVID:
    6912           0 :                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
    6913           0 :                 break;
    6914             :         default:
    6915             :                 break;
    6916             :         }
    6917             : 
    6918           0 :         drm_object_attach_property(&aconnector->base.base,
    6919           0 :                                 dm->ddev->mode_config.scaling_mode_property,
    6920             :                                 DRM_MODE_SCALE_NONE);
    6921             : 
    6922           0 :         drm_object_attach_property(&aconnector->base.base,
    6923             :                                 adev->mode_info.underscan_property,
    6924             :                                 UNDERSCAN_OFF);
    6925           0 :         drm_object_attach_property(&aconnector->base.base,
    6926             :                                 adev->mode_info.underscan_hborder_property,
    6927             :                                 0);
    6928           0 :         drm_object_attach_property(&aconnector->base.base,
    6929             :                                 adev->mode_info.underscan_vborder_property,
    6930             :                                 0);
    6931             : 
    6932           0 :         if (!aconnector->mst_port)
    6933           0 :                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
    6934             : 
    6935             :         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
    6936           0 :         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
    6937           0 :         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
    6938             : 
    6939           0 :         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
    6940           0 :             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
    6941           0 :                 drm_object_attach_property(&aconnector->base.base,
    6942             :                                 adev->mode_info.abm_level_property, 0);
    6943             :         }
    6944             : 
    6945           0 :         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
    6946           0 :             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
    6947           0 :             connector_type == DRM_MODE_CONNECTOR_eDP) {
    6948           0 :                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
    6949             : 
    6950           0 :                 if (!aconnector->mst_port)
    6951           0 :                         drm_connector_attach_vrr_capable_property(&aconnector->base);
    6952             : 
    6953             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    6954             :                 if (adev->dm.hdcp_workqueue)
    6955             :                         drm_connector_attach_content_protection_property(&aconnector->base, true);
    6956             : #endif
    6957             :         }
    6958           0 : }
    6959             : 
    6960           0 : static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
    6961             :                               struct i2c_msg *msgs, int num)
    6962             : {
    6963           0 :         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
    6964           0 :         struct ddc_service *ddc_service = i2c->ddc_service;
    6965             :         struct i2c_command cmd;
    6966             :         int i;
    6967           0 :         int result = -EIO;
    6968             : 
    6969           0 :         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
    6970             : 
    6971           0 :         if (!cmd.payloads)
    6972             :                 return result;
    6973             : 
    6974           0 :         cmd.number_of_payloads = num;
    6975           0 :         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
    6976           0 :         cmd.speed = 100;
    6977             : 
    6978           0 :         for (i = 0; i < num; i++) {
    6979           0 :                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
    6980           0 :                 cmd.payloads[i].address = msgs[i].addr;
    6981           0 :                 cmd.payloads[i].length = msgs[i].len;
    6982           0 :                 cmd.payloads[i].data = msgs[i].buf;
    6983             :         }
    6984             : 
    6985           0 :         if (dc_submit_i2c(
    6986           0 :                         ddc_service->ctx->dc,
    6987           0 :                         ddc_service->link->link_index,
    6988             :                         &cmd))
    6989           0 :                 result = num;
    6990             : 
    6991           0 :         kfree(cmd.payloads);
    6992           0 :         return result;
    6993             : }
    6994             : 
    6995           0 : static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
    6996             : {
    6997           0 :         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
    6998             : }
    6999             : 
    7000             : static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
    7001             :         .master_xfer = amdgpu_dm_i2c_xfer,
    7002             :         .functionality = amdgpu_dm_i2c_func,
    7003             : };
    7004             : 
    7005             : static struct amdgpu_i2c_adapter *
    7006           0 : create_i2c(struct ddc_service *ddc_service,
    7007             :            int link_index,
    7008             :            int *res)
    7009             : {
    7010           0 :         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
    7011             :         struct amdgpu_i2c_adapter *i2c;
    7012             : 
    7013           0 :         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
    7014           0 :         if (!i2c)
    7015             :                 return NULL;
    7016           0 :         i2c->base.owner = THIS_MODULE;
    7017           0 :         i2c->base.class = I2C_CLASS_DDC;
    7018           0 :         i2c->base.dev.parent = &adev->pdev->dev;
    7019           0 :         i2c->base.algo = &amdgpu_dm_i2c_algo;
    7020           0 :         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
    7021           0 :         i2c_set_adapdata(&i2c->base, i2c);
    7022           0 :         i2c->ddc_service = ddc_service;
    7023             : 
    7024             :         return i2c;
    7025             : }
    7026             : 
    7027             : 
    7028             : /*
    7029             :  * Note: this function assumes that dc_link_detect() was called for the
    7030             :  * dc_link which will be represented by this aconnector.
    7031             :  */
    7032           0 : static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
    7033             :                                     struct amdgpu_dm_connector *aconnector,
    7034             :                                     uint32_t link_index,
    7035             :                                     struct amdgpu_encoder *aencoder)
    7036             : {
    7037           0 :         int res = 0;
    7038             :         int connector_type;
    7039           0 :         struct dc *dc = dm->dc;
    7040           0 :         struct dc_link *link = dc_get_link_at_index(dc, link_index);
    7041             :         struct amdgpu_i2c_adapter *i2c;
    7042             : 
    7043           0 :         link->priv = aconnector;
    7044             : 
    7045           0 :         DRM_DEBUG_DRIVER("%s()\n", __func__);
    7046             : 
    7047           0 :         i2c = create_i2c(link->ddc, link->link_index, &res);
    7048           0 :         if (!i2c) {
    7049           0 :                 DRM_ERROR("Failed to create i2c adapter data\n");
    7050           0 :                 return -ENOMEM;
    7051             :         }
    7052             : 
    7053           0 :         aconnector->i2c = i2c;
    7054           0 :         res = i2c_add_adapter(&i2c->base);
    7055             : 
    7056           0 :         if (res) {
    7057           0 :                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
    7058           0 :                 goto out_free;
    7059             :         }
    7060             : 
    7061           0 :         connector_type = to_drm_connector_type(link->connector_signal);
    7062             : 
    7063           0 :         res = drm_connector_init_with_ddc(
    7064             :                         dm->ddev,
    7065             :                         &aconnector->base,
    7066             :                         &amdgpu_dm_connector_funcs,
    7067             :                         connector_type,
    7068             :                         &i2c->base);
    7069             : 
    7070           0 :         if (res) {
    7071           0 :                 DRM_ERROR("connector_init failed\n");
    7072           0 :                 aconnector->connector_id = -1;
    7073           0 :                 goto out_free;
    7074             :         }
    7075             : 
    7076           0 :         drm_connector_helper_add(
    7077             :                         &aconnector->base,
    7078             :                         &amdgpu_dm_connector_helper_funcs);
    7079             : 
    7080           0 :         amdgpu_dm_connector_init_helper(
    7081             :                 dm,
    7082             :                 aconnector,
    7083             :                 connector_type,
    7084             :                 link,
    7085             :                 link_index);
    7086             : 
    7087           0 :         drm_connector_attach_encoder(
    7088             :                 &aconnector->base, &aencoder->base);
    7089             : 
    7090           0 :         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
    7091           0 :                 || connector_type == DRM_MODE_CONNECTOR_eDP)
    7092           0 :                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
    7093             : 
    7094             : out_free:
    7095           0 :         if (res) {
    7096           0 :                 kfree(i2c);
    7097           0 :                 aconnector->i2c = NULL;
    7098             :         }
    7099             :         return res;
    7100             : }
    7101             : 
    7102           0 : int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
    7103             : {
    7104           0 :         switch (adev->mode_info.num_crtc) {
    7105             :         case 1:
    7106             :                 return 0x1;
    7107             :         case 2:
    7108             :                 return 0x3;
    7109             :         case 3:
    7110             :                 return 0x7;
    7111             :         case 4:
    7112             :                 return 0xf;
    7113             :         case 5:
    7114             :                 return 0x1f;
    7115             :         case 6:
    7116             :         default:
    7117             :                 return 0x3f;
    7118             :         }
    7119             : }
    7120             : 
    7121           0 : static int amdgpu_dm_encoder_init(struct drm_device *dev,
    7122             :                                   struct amdgpu_encoder *aencoder,
    7123             :                                   uint32_t link_index)
    7124             : {
    7125           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    7126             : 
    7127           0 :         int res = drm_encoder_init(dev,
    7128             :                                    &aencoder->base,
    7129             :                                    &amdgpu_dm_encoder_funcs,
    7130             :                                    DRM_MODE_ENCODER_TMDS,
    7131             :                                    NULL);
    7132             : 
    7133           0 :         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
    7134             : 
    7135           0 :         if (!res)
    7136           0 :                 aencoder->encoder_id = link_index;
    7137             :         else
    7138           0 :                 aencoder->encoder_id = -1;
    7139             : 
    7140           0 :         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
    7141             : 
    7142           0 :         return res;
    7143             : }
    7144             : 
    7145           0 : static void manage_dm_interrupts(struct amdgpu_device *adev,
    7146             :                                  struct amdgpu_crtc *acrtc,
    7147             :                                  bool enable)
    7148             : {
    7149             :         /*
    7150             :          * We have no guarantee that the frontend index maps to the same
    7151             :          * backend index - some even map to more than one.
    7152             :          *
    7153             :          * TODO: Use a different interrupt or check DC itself for the mapping.
    7154             :          */
    7155           0 :         int irq_type =
    7156           0 :                 amdgpu_display_crtc_idx_to_irq_type(
    7157             :                         adev,
    7158             :                         acrtc->crtc_id);
    7159             : 
    7160           0 :         if (enable) {
    7161           0 :                 drm_crtc_vblank_on(&acrtc->base);
    7162           0 :                 amdgpu_irq_get(
    7163             :                         adev,
    7164             :                         &adev->pageflip_irq,
    7165             :                         irq_type);
    7166             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    7167             :                 amdgpu_irq_get(
    7168             :                         adev,
    7169             :                         &adev->vline0_irq,
    7170             :                         irq_type);
    7171             : #endif
    7172             :         } else {
    7173             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    7174             :                 amdgpu_irq_put(
    7175             :                         adev,
    7176             :                         &adev->vline0_irq,
    7177             :                         irq_type);
    7178             : #endif
    7179           0 :                 amdgpu_irq_put(
    7180             :                         adev,
    7181             :                         &adev->pageflip_irq,
    7182             :                         irq_type);
    7183           0 :                 drm_crtc_vblank_off(&acrtc->base);
    7184             :         }
    7185           0 : }
    7186             : 
    7187           0 : static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
    7188             :                                       struct amdgpu_crtc *acrtc)
    7189             : {
    7190           0 :         int irq_type =
    7191           0 :                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
    7192             : 
    7193             :         /**
    7194             :          * This reads the current state for the IRQ and force reapplies
    7195             :          * the setting to hardware.
    7196             :          */
    7197           0 :         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
    7198           0 : }
    7199             : 
    7200             : static bool
    7201           0 : is_scaling_state_different(const struct dm_connector_state *dm_state,
    7202             :                            const struct dm_connector_state *old_dm_state)
    7203             : {
    7204           0 :         if (dm_state->scaling != old_dm_state->scaling)
    7205             :                 return true;
    7206           0 :         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
    7207           0 :                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
    7208             :                         return true;
    7209           0 :         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
    7210           0 :                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
    7211             :                         return true;
    7212           0 :         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
    7213             :                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
    7214             :                 return true;
    7215           0 :         return false;
    7216             : }
    7217             : 
    7218             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    7219             : static bool is_content_protection_different(struct drm_connector_state *state,
    7220             :                                             const struct drm_connector_state *old_state,
    7221             :                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
    7222             : {
    7223             :         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    7224             :         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
    7225             : 
    7226             :         /* Handle: Type0/1 change */
    7227             :         if (old_state->hdcp_content_type != state->hdcp_content_type &&
    7228             :             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
    7229             :                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
    7230             :                 return true;
    7231             :         }
    7232             : 
    7233             :         /* CP is being re enabled, ignore this
    7234             :          *
    7235             :          * Handles:     ENABLED -> DESIRED
    7236             :          */
    7237             :         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
    7238             :             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
    7239             :                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
    7240             :                 return false;
    7241             :         }
    7242             : 
    7243             :         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
    7244             :          *
    7245             :          * Handles:     UNDESIRED -> ENABLED
    7246             :          */
    7247             :         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
    7248             :             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
    7249             :                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
    7250             : 
    7251             :         /* Stream removed and re-enabled
    7252             :          *
    7253             :          * Can sometimes overlap with the HPD case,
    7254             :          * thus set update_hdcp to false to avoid
    7255             :          * setting HDCP multiple times.
    7256             :          *
    7257             :          * Handles:     DESIRED -> DESIRED (Special case)
    7258             :          */
    7259             :         if (!(old_state->crtc && old_state->crtc->enabled) &&
    7260             :                 state->crtc && state->crtc->enabled &&
    7261             :                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
    7262             :                 dm_con_state->update_hdcp = false;
    7263             :                 return true;
    7264             :         }
    7265             : 
    7266             :         /* Hot-plug, headless s3, dpms
    7267             :          *
    7268             :          * Only start HDCP if the display is connected/enabled.
    7269             :          * update_hdcp flag will be set to false until the next
    7270             :          * HPD comes in.
    7271             :          *
    7272             :          * Handles:     DESIRED -> DESIRED (Special case)
    7273             :          */
    7274             :         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
    7275             :             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
    7276             :                 dm_con_state->update_hdcp = false;
    7277             :                 return true;
    7278             :         }
    7279             : 
    7280             :         /*
    7281             :          * Handles:     UNDESIRED -> UNDESIRED
    7282             :          *              DESIRED -> DESIRED
    7283             :          *              ENABLED -> ENABLED
    7284             :          */
    7285             :         if (old_state->content_protection == state->content_protection)
    7286             :                 return false;
    7287             : 
    7288             :         /*
    7289             :          * Handles:     UNDESIRED -> DESIRED
    7290             :          *              DESIRED -> UNDESIRED
    7291             :          *              ENABLED -> UNDESIRED
    7292             :          */
    7293             :         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
    7294             :                 return true;
    7295             : 
    7296             :         /*
    7297             :          * Handles:     DESIRED -> ENABLED
    7298             :          */
    7299             :         return false;
    7300             : }
    7301             : 
    7302             : #endif
    7303             : static void remove_stream(struct amdgpu_device *adev,
    7304             :                           struct amdgpu_crtc *acrtc,
    7305             :                           struct dc_stream_state *stream)
    7306             : {
    7307             :         /* this is the update mode case */
    7308             : 
    7309           0 :         acrtc->otg_inst = -1;
    7310           0 :         acrtc->enabled = false;
    7311             : }
    7312             : 
    7313           0 : static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
    7314             : {
    7315             : 
    7316             :         assert_spin_locked(&acrtc->base.dev->event_lock);
    7317           0 :         WARN_ON(acrtc->event);
    7318             : 
    7319           0 :         acrtc->event = acrtc->base.state->event;
    7320             : 
    7321             :         /* Set the flip status */
    7322           0 :         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
    7323             : 
    7324             :         /* Mark this event as consumed */
    7325           0 :         acrtc->base.state->event = NULL;
    7326             : 
    7327             :         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
    7328             :                      acrtc->crtc_id);
    7329           0 : }
    7330             : 
    7331           0 : static void update_freesync_state_on_stream(
    7332             :         struct amdgpu_display_manager *dm,
    7333             :         struct dm_crtc_state *new_crtc_state,
    7334             :         struct dc_stream_state *new_stream,
    7335             :         struct dc_plane_state *surface,
    7336             :         u32 flip_timestamp_in_us)
    7337             : {
    7338             :         struct mod_vrr_params vrr_params;
    7339           0 :         struct dc_info_packet vrr_infopacket = {0};
    7340           0 :         struct amdgpu_device *adev = dm->adev;
    7341           0 :         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
    7342             :         unsigned long flags;
    7343           0 :         bool pack_sdp_v1_3 = false;
    7344             : 
    7345           0 :         if (!new_stream)
    7346           0 :                 return;
    7347             : 
    7348             :         /*
    7349             :          * TODO: Determine why min/max totals and vrefresh can be 0 here.
    7350             :          * For now it's sufficient to just guard against these conditions.
    7351             :          */
    7352             : 
    7353           0 :         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
    7354             :                 return;
    7355             : 
    7356           0 :         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
    7357           0 :         vrr_params = acrtc->dm_irq_params.vrr_params;
    7358             : 
    7359           0 :         if (surface) {
    7360           0 :                 mod_freesync_handle_preflip(
    7361             :                         dm->freesync_module,
    7362             :                         surface,
    7363             :                         new_stream,
    7364             :                         flip_timestamp_in_us,
    7365             :                         &vrr_params);
    7366             : 
    7367           0 :                 if (adev->family < AMDGPU_FAMILY_AI &&
    7368           0 :                     amdgpu_dm_vrr_active(new_crtc_state)) {
    7369           0 :                         mod_freesync_handle_v_update(dm->freesync_module,
    7370             :                                                      new_stream, &vrr_params);
    7371             : 
    7372             :                         /* Need to call this before the frame ends. */
    7373           0 :                         dc_stream_adjust_vmin_vmax(dm->dc,
    7374             :                                                    new_crtc_state->stream,
    7375             :                                                    &vrr_params.adjust);
    7376             :                 }
    7377             :         }
    7378             : 
    7379           0 :         mod_freesync_build_vrr_infopacket(
    7380             :                 dm->freesync_module,
    7381             :                 new_stream,
    7382             :                 &vrr_params,
    7383             :                 PACKET_TYPE_VRR,
    7384             :                 TRANSFER_FUNC_UNKNOWN,
    7385             :                 &vrr_infopacket,
    7386             :                 pack_sdp_v1_3);
    7387             : 
    7388           0 :         new_crtc_state->freesync_timing_changed |=
    7389           0 :                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
    7390             :                         &vrr_params.adjust,
    7391           0 :                         sizeof(vrr_params.adjust)) != 0);
    7392             : 
    7393           0 :         new_crtc_state->freesync_vrr_info_changed |=
    7394           0 :                 (memcmp(&new_crtc_state->vrr_infopacket,
    7395             :                         &vrr_infopacket,
    7396           0 :                         sizeof(vrr_infopacket)) != 0);
    7397             : 
    7398           0 :         acrtc->dm_irq_params.vrr_params = vrr_params;
    7399           0 :         new_crtc_state->vrr_infopacket = vrr_infopacket;
    7400             : 
    7401           0 :         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
    7402           0 :         new_stream->vrr_infopacket = vrr_infopacket;
    7403             : 
    7404           0 :         if (new_crtc_state->freesync_vrr_info_changed)
    7405           0 :                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
    7406             :                               new_crtc_state->base.crtc->base.id,
    7407             :                               (int)new_crtc_state->base.vrr_enabled,
    7408             :                               (int)vrr_params.state);
    7409             : 
    7410           0 :         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
    7411             : }
    7412             : 
    7413           0 : static void update_stream_irq_parameters(
    7414             :         struct amdgpu_display_manager *dm,
    7415             :         struct dm_crtc_state *new_crtc_state)
    7416             : {
    7417           0 :         struct dc_stream_state *new_stream = new_crtc_state->stream;
    7418             :         struct mod_vrr_params vrr_params;
    7419           0 :         struct mod_freesync_config config = new_crtc_state->freesync_config;
    7420           0 :         struct amdgpu_device *adev = dm->adev;
    7421           0 :         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
    7422             :         unsigned long flags;
    7423             : 
    7424           0 :         if (!new_stream)
    7425           0 :                 return;
    7426             : 
    7427             :         /*
    7428             :          * TODO: Determine why min/max totals and vrefresh can be 0 here.
    7429             :          * For now it's sufficient to just guard against these conditions.
    7430             :          */
    7431           0 :         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
    7432             :                 return;
    7433             : 
    7434           0 :         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
    7435           0 :         vrr_params = acrtc->dm_irq_params.vrr_params;
    7436             : 
    7437           0 :         if (new_crtc_state->vrr_supported &&
    7438           0 :             config.min_refresh_in_uhz &&
    7439           0 :             config.max_refresh_in_uhz) {
    7440             :                 /*
    7441             :                  * if freesync compatible mode was set, config.state will be set
    7442             :                  * in atomic check
    7443             :                  */
    7444           0 :                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
    7445           0 :                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
    7446           0 :                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
    7447           0 :                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
    7448           0 :                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
    7449           0 :                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
    7450           0 :                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
    7451             :                 } else {
    7452           0 :                         config.state = new_crtc_state->base.vrr_enabled ?
    7453           0 :                                                      VRR_STATE_ACTIVE_VARIABLE :
    7454             :                                                      VRR_STATE_INACTIVE;
    7455             :                 }
    7456             :         } else {
    7457           0 :                 config.state = VRR_STATE_UNSUPPORTED;
    7458             :         }
    7459             : 
    7460           0 :         mod_freesync_build_vrr_params(dm->freesync_module,
    7461             :                                       new_stream,
    7462             :                                       &config, &vrr_params);
    7463             : 
    7464           0 :         new_crtc_state->freesync_timing_changed |=
    7465           0 :                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
    7466           0 :                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
    7467             : 
    7468           0 :         new_crtc_state->freesync_config = config;
    7469             :         /* Copy state for access from DM IRQ handler */
    7470           0 :         acrtc->dm_irq_params.freesync_config = config;
    7471           0 :         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
    7472           0 :         acrtc->dm_irq_params.vrr_params = vrr_params;
    7473           0 :         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
    7474             : }
    7475             : 
    7476           0 : static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
    7477             :                                             struct dm_crtc_state *new_state)
    7478             : {
    7479           0 :         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
    7480           0 :         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
    7481             : 
    7482           0 :         if (!old_vrr_active && new_vrr_active) {
    7483             :                 /* Transition VRR inactive -> active:
    7484             :                  * While VRR is active, we must not disable vblank irq, as a
    7485             :                  * reenable after disable would compute bogus vblank/pflip
    7486             :                  * timestamps if it likely happened inside display front-porch.
    7487             :                  *
    7488             :                  * We also need vupdate irq for the actual core vblank handling
    7489             :                  * at end of vblank.
    7490             :                  */
    7491           0 :                 dm_set_vupdate_irq(new_state->base.crtc, true);
    7492           0 :                 drm_crtc_vblank_get(new_state->base.crtc);
    7493           0 :                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
    7494             :                                  __func__, new_state->base.crtc->base.id);
    7495           0 :         } else if (old_vrr_active && !new_vrr_active) {
    7496             :                 /* Transition VRR active -> inactive:
    7497             :                  * Allow vblank irq disable again for fixed refresh rate.
    7498             :                  */
    7499           0 :                 dm_set_vupdate_irq(new_state->base.crtc, false);
    7500           0 :                 drm_crtc_vblank_put(new_state->base.crtc);
    7501           0 :                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
    7502             :                                  __func__, new_state->base.crtc->base.id);
    7503             :         }
    7504           0 : }
    7505             : 
    7506           0 : static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
    7507             : {
    7508             :         struct drm_plane *plane;
    7509             :         struct drm_plane_state *old_plane_state;
    7510             :         int i;
    7511             : 
    7512             :         /*
    7513             :          * TODO: Make this per-stream so we don't issue redundant updates for
    7514             :          * commits with multiple streams.
    7515             :          */
    7516           0 :         for_each_old_plane_in_state(state, plane, old_plane_state, i)
    7517           0 :                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
    7518           0 :                         handle_cursor_update(plane, old_plane_state);
    7519           0 : }
    7520             : 
    7521           0 : static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
    7522             :                                     struct dc_state *dc_state,
    7523             :                                     struct drm_device *dev,
    7524             :                                     struct amdgpu_display_manager *dm,
    7525             :                                     struct drm_crtc *pcrtc,
    7526             :                                     bool wait_for_vblank)
    7527             : {
    7528             :         uint32_t i;
    7529             :         uint64_t timestamp_ns;
    7530             :         struct drm_plane *plane;
    7531             :         struct drm_plane_state *old_plane_state, *new_plane_state;
    7532           0 :         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
    7533           0 :         struct drm_crtc_state *new_pcrtc_state =
    7534             :                         drm_atomic_get_new_crtc_state(state, pcrtc);
    7535           0 :         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
    7536           0 :         struct dm_crtc_state *dm_old_crtc_state =
    7537           0 :                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
    7538           0 :         int planes_count = 0, vpos, hpos;
    7539             :         long r;
    7540             :         unsigned long flags;
    7541             :         struct amdgpu_bo *abo;
    7542             :         uint32_t target_vblank, last_flip_vblank;
    7543           0 :         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
    7544           0 :         bool cursor_update = false;
    7545           0 :         bool pflip_present = false;
    7546             :         struct {
    7547             :                 struct dc_surface_update surface_updates[MAX_SURFACES];
    7548             :                 struct dc_plane_info plane_infos[MAX_SURFACES];
    7549             :                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
    7550             :                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
    7551             :                 struct dc_stream_update stream_update;
    7552             :         } *bundle;
    7553             : 
    7554           0 :         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
    7555             : 
    7556           0 :         if (!bundle) {
    7557           0 :                 dm_error("Failed to allocate update bundle\n");
    7558           0 :                 goto cleanup;
    7559             :         }
    7560             : 
    7561             :         /*
    7562             :          * Disable the cursor first if we're disabling all the planes.
    7563             :          * It'll remain on the screen after the planes are re-enabled
    7564             :          * if we don't.
    7565             :          */
    7566           0 :         if (acrtc_state->active_planes == 0)
    7567           0 :                 amdgpu_dm_commit_cursors(state);
    7568             : 
    7569             :         /* update planes when needed */
    7570           0 :         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
    7571           0 :                 struct drm_crtc *crtc = new_plane_state->crtc;
    7572             :                 struct drm_crtc_state *new_crtc_state;
    7573           0 :                 struct drm_framebuffer *fb = new_plane_state->fb;
    7574           0 :                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
    7575             :                 bool plane_needs_flip;
    7576             :                 struct dc_plane_state *dc_plane;
    7577           0 :                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
    7578             : 
    7579             :                 /* Cursor plane is handled after stream updates */
    7580           0 :                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
    7581           0 :                         if ((fb && crtc == pcrtc) ||
    7582           0 :                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
    7583           0 :                                 cursor_update = true;
    7584             : 
    7585           0 :                         continue;
    7586             :                 }
    7587             : 
    7588           0 :                 if (!fb || !crtc || pcrtc != crtc)
    7589           0 :                         continue;
    7590             : 
    7591           0 :                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
    7592           0 :                 if (!new_crtc_state->active)
    7593           0 :                         continue;
    7594             : 
    7595           0 :                 dc_plane = dm_new_plane_state->dc_state;
    7596             : 
    7597           0 :                 bundle->surface_updates[planes_count].surface = dc_plane;
    7598           0 :                 if (new_pcrtc_state->color_mgmt_changed) {
    7599           0 :                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
    7600           0 :                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
    7601           0 :                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
    7602             :                 }
    7603             : 
    7604           0 :                 fill_dc_scaling_info(dm->adev, new_plane_state,
    7605             :                                      &bundle->scaling_infos[planes_count]);
    7606             : 
    7607           0 :                 bundle->surface_updates[planes_count].scaling_info =
    7608             :                         &bundle->scaling_infos[planes_count];
    7609             : 
    7610           0 :                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
    7611             : 
    7612           0 :                 pflip_present = pflip_present || plane_needs_flip;
    7613             : 
    7614           0 :                 if (!plane_needs_flip) {
    7615           0 :                         planes_count += 1;
    7616           0 :                         continue;
    7617             :                 }
    7618             : 
    7619           0 :                 abo = gem_to_amdgpu_bo(fb->obj[0]);
    7620             : 
    7621             :                 /*
    7622             :                  * Wait for all fences on this FB. Do limited wait to avoid
    7623             :                  * deadlock during GPU reset when this fence will not signal
    7624             :                  * but we hold reservation lock for the BO.
    7625             :                  */
    7626           0 :                 r = dma_resv_wait_timeout(abo->tbo.base.resv,
    7627             :                                           DMA_RESV_USAGE_WRITE, false,
    7628             :                                           msecs_to_jiffies(5000));
    7629           0 :                 if (unlikely(r <= 0))
    7630           0 :                         DRM_ERROR("Waiting for fences timed out!");
    7631             : 
    7632           0 :                 fill_dc_plane_info_and_addr(
    7633             :                         dm->adev, new_plane_state,
    7634             :                         afb->tiling_flags,
    7635             :                         &bundle->plane_infos[planes_count],
    7636             :                         &bundle->flip_addrs[planes_count].address,
    7637           0 :                         afb->tmz_surface, false);
    7638             : 
    7639           0 :                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
    7640             :                                  new_plane_state->plane->index,
    7641             :                                  bundle->plane_infos[planes_count].dcc.enable);
    7642             : 
    7643           0 :                 bundle->surface_updates[planes_count].plane_info =
    7644             :                         &bundle->plane_infos[planes_count];
    7645             : 
    7646           0 :                 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
    7647             :                                     new_crtc_state,
    7648             :                                     &bundle->flip_addrs[planes_count]);
    7649             : 
    7650             :                 /*
    7651             :                  * Only allow immediate flips for fast updates that don't
    7652             :                  * change FB pitch, DCC state, rotation or mirroing.
    7653             :                  */
    7654           0 :                 bundle->flip_addrs[planes_count].flip_immediate =
    7655           0 :                         crtc->state->async_flip &&
    7656           0 :                         acrtc_state->update_type == UPDATE_TYPE_FAST;
    7657             : 
    7658           0 :                 timestamp_ns = ktime_get_ns();
    7659           0 :                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
    7660           0 :                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
    7661           0 :                 bundle->surface_updates[planes_count].surface = dc_plane;
    7662             : 
    7663           0 :                 if (!bundle->surface_updates[planes_count].surface) {
    7664           0 :                         DRM_ERROR("No surface for CRTC: id=%d\n",
    7665             :                                         acrtc_attach->crtc_id);
    7666           0 :                         continue;
    7667             :                 }
    7668             : 
    7669           0 :                 if (plane == pcrtc->primary)
    7670           0 :                         update_freesync_state_on_stream(
    7671             :                                 dm,
    7672             :                                 acrtc_state,
    7673             :                                 acrtc_state->stream,
    7674             :                                 dc_plane,
    7675             :                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
    7676             : 
    7677           0 :                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
    7678             :                                  __func__,
    7679             :                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
    7680             :                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
    7681             : 
    7682           0 :                 planes_count += 1;
    7683             : 
    7684             :         }
    7685             : 
    7686           0 :         if (pflip_present) {
    7687           0 :                 if (!vrr_active) {
    7688             :                         /* Use old throttling in non-vrr fixed refresh rate mode
    7689             :                          * to keep flip scheduling based on target vblank counts
    7690             :                          * working in a backwards compatible way, e.g., for
    7691             :                          * clients using the GLX_OML_sync_control extension or
    7692             :                          * DRI3/Present extension with defined target_msc.
    7693             :                          */
    7694           0 :                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
    7695             :                 }
    7696             :                 else {
    7697             :                         /* For variable refresh rate mode only:
    7698             :                          * Get vblank of last completed flip to avoid > 1 vrr
    7699             :                          * flips per video frame by use of throttling, but allow
    7700             :                          * flip programming anywhere in the possibly large
    7701             :                          * variable vrr vblank interval for fine-grained flip
    7702             :                          * timing control and more opportunity to avoid stutter
    7703             :                          * on late submission of flips.
    7704             :                          */
    7705           0 :                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
    7706           0 :                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
    7707           0 :                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
    7708             :                 }
    7709             : 
    7710           0 :                 target_vblank = last_flip_vblank + wait_for_vblank;
    7711             : 
    7712             :                 /*
    7713             :                  * Wait until we're out of the vertical blank period before the one
    7714             :                  * targeted by the flip
    7715             :                  */
    7716           0 :                 while ((acrtc_attach->enabled &&
    7717           0 :                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
    7718             :                                                             0, &vpos, &hpos, NULL,
    7719           0 :                                                             NULL, &pcrtc->hwmode)
    7720           0 :                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
    7721           0 :                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
    7722           0 :                         (int)(target_vblank -
    7723           0 :                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
    7724             :                         usleep_range(1000, 1100);
    7725             :                 }
    7726             : 
    7727             :                 /**
    7728             :                  * Prepare the flip event for the pageflip interrupt to handle.
    7729             :                  *
    7730             :                  * This only works in the case where we've already turned on the
    7731             :                  * appropriate hardware blocks (eg. HUBP) so in the transition case
    7732             :                  * from 0 -> n planes we have to skip a hardware generated event
    7733             :                  * and rely on sending it from software.
    7734             :                  */
    7735           0 :                 if (acrtc_attach->base.state->event &&
    7736           0 :                     acrtc_state->active_planes > 0) {
    7737           0 :                         drm_crtc_vblank_get(pcrtc);
    7738             : 
    7739           0 :                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
    7740             : 
    7741           0 :                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
    7742           0 :                         prepare_flip_isr(acrtc_attach);
    7743             : 
    7744           0 :                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
    7745             :                 }
    7746             : 
    7747           0 :                 if (acrtc_state->stream) {
    7748           0 :                         if (acrtc_state->freesync_vrr_info_changed)
    7749           0 :                                 bundle->stream_update.vrr_infopacket =
    7750           0 :                                         &acrtc_state->stream->vrr_infopacket;
    7751             :                 }
    7752           0 :         } else if (cursor_update && acrtc_state->active_planes > 0 &&
    7753           0 :                    acrtc_attach->base.state->event) {
    7754           0 :                 drm_crtc_vblank_get(pcrtc);
    7755             : 
    7756           0 :                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
    7757             : 
    7758           0 :                 acrtc_attach->event = acrtc_attach->base.state->event;
    7759           0 :                 acrtc_attach->base.state->event = NULL;
    7760             : 
    7761           0 :                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
    7762             :         }
    7763             : 
    7764             :         /* Update the planes if changed or disable if we don't have any. */
    7765           0 :         if ((planes_count || acrtc_state->active_planes == 0) &&
    7766           0 :                 acrtc_state->stream) {
    7767             :                 /*
    7768             :                  * If PSR or idle optimizations are enabled then flush out
    7769             :                  * any pending work before hardware programming.
    7770             :                  */
    7771           0 :                 if (dm->vblank_control_workqueue)
    7772           0 :                         flush_workqueue(dm->vblank_control_workqueue);
    7773             : 
    7774           0 :                 bundle->stream_update.stream = acrtc_state->stream;
    7775           0 :                 if (new_pcrtc_state->mode_changed) {
    7776           0 :                         bundle->stream_update.src = acrtc_state->stream->src;
    7777           0 :                         bundle->stream_update.dst = acrtc_state->stream->dst;
    7778             :                 }
    7779             : 
    7780           0 :                 if (new_pcrtc_state->color_mgmt_changed) {
    7781             :                         /*
    7782             :                          * TODO: This isn't fully correct since we've actually
    7783             :                          * already modified the stream in place.
    7784             :                          */
    7785           0 :                         bundle->stream_update.gamut_remap =
    7786           0 :                                 &acrtc_state->stream->gamut_remap_matrix;
    7787           0 :                         bundle->stream_update.output_csc_transform =
    7788           0 :                                 &acrtc_state->stream->csc_color_matrix;
    7789           0 :                         bundle->stream_update.out_transfer_func =
    7790           0 :                                 acrtc_state->stream->out_transfer_func;
    7791             :                 }
    7792             : 
    7793           0 :                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
    7794           0 :                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
    7795           0 :                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
    7796             : 
    7797             :                 /*
    7798             :                  * If FreeSync state on the stream has changed then we need to
    7799             :                  * re-adjust the min/max bounds now that DC doesn't handle this
    7800             :                  * as part of commit.
    7801             :                  */
    7802           0 :                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
    7803           0 :                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
    7804           0 :                         dc_stream_adjust_vmin_vmax(
    7805             :                                 dm->dc, acrtc_state->stream,
    7806             :                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
    7807           0 :                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
    7808             :                 }
    7809           0 :                 mutex_lock(&dm->dc_lock);
    7810           0 :                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
    7811           0 :                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
    7812           0 :                         amdgpu_dm_psr_disable(acrtc_state->stream);
    7813             : 
    7814           0 :                 dc_commit_updates_for_stream(dm->dc,
    7815           0 :                                                      bundle->surface_updates,
    7816             :                                                      planes_count,
    7817             :                                                      acrtc_state->stream,
    7818             :                                                      &bundle->stream_update,
    7819             :                                                      dc_state);
    7820             : 
    7821             :                 /**
    7822             :                  * Enable or disable the interrupts on the backend.
    7823             :                  *
    7824             :                  * Most pipes are put into power gating when unused.
    7825             :                  *
    7826             :                  * When power gating is enabled on a pipe we lose the
    7827             :                  * interrupt enablement state when power gating is disabled.
    7828             :                  *
    7829             :                  * So we need to update the IRQ control state in hardware
    7830             :                  * whenever the pipe turns on (since it could be previously
    7831             :                  * power gated) or off (since some pipes can't be power gated
    7832             :                  * on some ASICs).
    7833             :                  */
    7834           0 :                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
    7835           0 :                         dm_update_pflip_irq_state(drm_to_adev(dev),
    7836             :                                                   acrtc_attach);
    7837             : 
    7838           0 :                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
    7839           0 :                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
    7840           0 :                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
    7841           0 :                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
    7842             : 
    7843             :                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
    7844           0 :                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
    7845           0 :                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
    7846           0 :                         struct amdgpu_dm_connector *aconn =
    7847             :                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
    7848             : 
    7849           0 :                         if (aconn->psr_skip_count > 0)
    7850           0 :                                 aconn->psr_skip_count--;
    7851             : 
    7852             :                         /* Allow PSR when skip count is 0. */
    7853           0 :                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
    7854             : 
    7855             :                         /*
    7856             :                          * If sink supports PSR SU, there is no need to rely on
    7857             :                          * a vblank event disable request to enable PSR. PSR SU
    7858             :                          * can be enabled immediately once OS demonstrates an
    7859             :                          * adequate number of fast atomic commits to notify KMD
    7860             :                          * of update events. See `vblank_control_worker()`.
    7861             :                          */
    7862           0 :                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
    7863           0 :                             acrtc_attach->dm_irq_params.allow_psr_entry &&
    7864           0 :                             !acrtc_state->stream->link->psr_settings.psr_allow_active)
    7865           0 :                                 amdgpu_dm_psr_enable(acrtc_state->stream);
    7866             :                 } else {
    7867           0 :                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
    7868             :                 }
    7869             : 
    7870           0 :                 mutex_unlock(&dm->dc_lock);
    7871             :         }
    7872             : 
    7873             :         /*
    7874             :          * Update cursor state *after* programming all the planes.
    7875             :          * This avoids redundant programming in the case where we're going
    7876             :          * to be disabling a single plane - those pipes are being disabled.
    7877             :          */
    7878           0 :         if (acrtc_state->active_planes)
    7879           0 :                 amdgpu_dm_commit_cursors(state);
    7880             : 
    7881             : cleanup:
    7882           0 :         kfree(bundle);
    7883           0 : }
    7884             : 
    7885           0 : static void amdgpu_dm_commit_audio(struct drm_device *dev,
    7886             :                                    struct drm_atomic_state *state)
    7887             : {
    7888           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    7889             :         struct amdgpu_dm_connector *aconnector;
    7890             :         struct drm_connector *connector;
    7891             :         struct drm_connector_state *old_con_state, *new_con_state;
    7892             :         struct drm_crtc_state *new_crtc_state;
    7893             :         struct dm_crtc_state *new_dm_crtc_state;
    7894             :         const struct dc_stream_status *status;
    7895             :         int i, inst;
    7896             : 
    7897             :         /* Notify device removals. */
    7898           0 :         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
    7899           0 :                 if (old_con_state->crtc != new_con_state->crtc) {
    7900             :                         /* CRTC changes require notification. */
    7901             :                         goto notify;
    7902             :                 }
    7903             : 
    7904           0 :                 if (!new_con_state->crtc)
    7905           0 :                         continue;
    7906             : 
    7907           0 :                 new_crtc_state = drm_atomic_get_new_crtc_state(
    7908             :                         state, new_con_state->crtc);
    7909             : 
    7910           0 :                 if (!new_crtc_state)
    7911           0 :                         continue;
    7912             : 
    7913           0 :                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
    7914           0 :                         continue;
    7915             : 
    7916             :         notify:
    7917           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    7918             : 
    7919           0 :                 mutex_lock(&adev->dm.audio_lock);
    7920           0 :                 inst = aconnector->audio_inst;
    7921           0 :                 aconnector->audio_inst = -1;
    7922           0 :                 mutex_unlock(&adev->dm.audio_lock);
    7923             : 
    7924           0 :                 amdgpu_dm_audio_eld_notify(adev, inst);
    7925             :         }
    7926             : 
    7927             :         /* Notify audio device additions. */
    7928           0 :         for_each_new_connector_in_state(state, connector, new_con_state, i) {
    7929           0 :                 if (!new_con_state->crtc)
    7930           0 :                         continue;
    7931             : 
    7932           0 :                 new_crtc_state = drm_atomic_get_new_crtc_state(
    7933             :                         state, new_con_state->crtc);
    7934             : 
    7935           0 :                 if (!new_crtc_state)
    7936           0 :                         continue;
    7937             : 
    7938           0 :                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
    7939           0 :                         continue;
    7940             : 
    7941           0 :                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
    7942           0 :                 if (!new_dm_crtc_state->stream)
    7943           0 :                         continue;
    7944             : 
    7945           0 :                 status = dc_stream_get_status(new_dm_crtc_state->stream);
    7946           0 :                 if (!status)
    7947           0 :                         continue;
    7948             : 
    7949           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    7950             : 
    7951           0 :                 mutex_lock(&adev->dm.audio_lock);
    7952           0 :                 inst = status->audio_inst;
    7953           0 :                 aconnector->audio_inst = inst;
    7954           0 :                 mutex_unlock(&adev->dm.audio_lock);
    7955             : 
    7956           0 :                 amdgpu_dm_audio_eld_notify(adev, inst);
    7957             :         }
    7958           0 : }
    7959             : 
    7960             : /*
    7961             :  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
    7962             :  * @crtc_state: the DRM CRTC state
    7963             :  * @stream_state: the DC stream state.
    7964             :  *
    7965             :  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
    7966             :  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
    7967             :  */
    7968             : static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
    7969             :                                                 struct dc_stream_state *stream_state)
    7970             : {
    7971           0 :         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
    7972             : }
    7973             : 
    7974             : /**
    7975             :  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
    7976             :  * @state: The atomic state to commit
    7977             :  *
    7978             :  * This will tell DC to commit the constructed DC state from atomic_check,
    7979             :  * programming the hardware. Any failures here implies a hardware failure, since
    7980             :  * atomic check should have filtered anything non-kosher.
    7981             :  */
    7982           0 : static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
    7983             : {
    7984           0 :         struct drm_device *dev = state->dev;
    7985           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    7986           0 :         struct amdgpu_display_manager *dm = &adev->dm;
    7987             :         struct dm_atomic_state *dm_state;
    7988           0 :         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
    7989             :         uint32_t i, j;
    7990             :         struct drm_crtc *crtc;
    7991             :         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
    7992             :         unsigned long flags;
    7993           0 :         bool wait_for_vblank = true;
    7994             :         struct drm_connector *connector;
    7995             :         struct drm_connector_state *old_con_state, *new_con_state;
    7996             :         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
    7997           0 :         int crtc_disable_count = 0;
    7998           0 :         bool mode_set_reset_required = false;
    7999             : 
    8000           0 :         trace_amdgpu_dm_atomic_commit_tail_begin(state);
    8001             : 
    8002           0 :         drm_atomic_helper_update_legacy_modeset_state(dev, state);
    8003             : 
    8004           0 :         dm_state = dm_atomic_get_new_state(state);
    8005           0 :         if (dm_state && dm_state->context) {
    8006             :                 dc_state = dm_state->context;
    8007             :         } else {
    8008             :                 /* No state changes, retain current state. */
    8009           0 :                 dc_state_temp = dc_create_state(dm->dc);
    8010           0 :                 ASSERT(dc_state_temp);
    8011           0 :                 dc_state = dc_state_temp;
    8012           0 :                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
    8013             :         }
    8014             : 
    8015           0 :         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
    8016             :                                        new_crtc_state, i) {
    8017           0 :                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
    8018             : 
    8019           0 :                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
    8020             : 
    8021           0 :                 if (old_crtc_state->active &&
    8022           0 :                     (!new_crtc_state->active ||
    8023           0 :                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
    8024           0 :                         manage_dm_interrupts(adev, acrtc, false);
    8025           0 :                         dc_stream_release(dm_old_crtc_state->stream);
    8026             :                 }
    8027             :         }
    8028             : 
    8029           0 :         drm_atomic_helper_calc_timestamping_constants(state);
    8030             : 
    8031             :         /* update changed items */
    8032           0 :         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
    8033           0 :                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
    8034             : 
    8035           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8036           0 :                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
    8037             : 
    8038           0 :                 drm_dbg_state(state->dev,
    8039             :                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
    8040             :                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
    8041             :                         "connectors_changed:%d\n",
    8042             :                         acrtc->crtc_id,
    8043             :                         new_crtc_state->enable,
    8044             :                         new_crtc_state->active,
    8045             :                         new_crtc_state->planes_changed,
    8046             :                         new_crtc_state->mode_changed,
    8047             :                         new_crtc_state->active_changed,
    8048             :                         new_crtc_state->connectors_changed);
    8049             : 
    8050             :                 /* Disable cursor if disabling crtc */
    8051           0 :                 if (old_crtc_state->active && !new_crtc_state->active) {
    8052             :                         struct dc_cursor_position position;
    8053             : 
    8054           0 :                         memset(&position, 0, sizeof(position));
    8055           0 :                         mutex_lock(&dm->dc_lock);
    8056           0 :                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
    8057           0 :                         mutex_unlock(&dm->dc_lock);
    8058             :                 }
    8059             : 
    8060             :                 /* Copy all transient state flags into dc state */
    8061           0 :                 if (dm_new_crtc_state->stream) {
    8062           0 :                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
    8063             :                                                             dm_new_crtc_state->stream);
    8064             :                 }
    8065             : 
    8066             :                 /* handles headless hotplug case, updating new_state and
    8067             :                  * aconnector as needed
    8068             :                  */
    8069             : 
    8070           0 :                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
    8071             : 
    8072           0 :                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
    8073             : 
    8074           0 :                         if (!dm_new_crtc_state->stream) {
    8075             :                                 /*
    8076             :                                  * this could happen because of issues with
    8077             :                                  * userspace notifications delivery.
    8078             :                                  * In this case userspace tries to set mode on
    8079             :                                  * display which is disconnected in fact.
    8080             :                                  * dc_sink is NULL in this case on aconnector.
    8081             :                                  * We expect reset mode will come soon.
    8082             :                                  *
    8083             :                                  * This can also happen when unplug is done
    8084             :                                  * during resume sequence ended
    8085             :                                  *
    8086             :                                  * In this case, we want to pretend we still
    8087             :                                  * have a sink to keep the pipe running so that
    8088             :                                  * hw state is consistent with the sw state
    8089             :                                  */
    8090           0 :                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
    8091             :                                                 __func__, acrtc->base.base.id);
    8092           0 :                                 continue;
    8093             :                         }
    8094             : 
    8095           0 :                         if (dm_old_crtc_state->stream)
    8096           0 :                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
    8097             : 
    8098           0 :                         pm_runtime_get_noresume(dev->dev);
    8099             : 
    8100           0 :                         acrtc->enabled = true;
    8101           0 :                         acrtc->hw_mode = new_crtc_state->mode;
    8102           0 :                         crtc->hwmode = new_crtc_state->mode;
    8103           0 :                         mode_set_reset_required = true;
    8104           0 :                 } else if (modereset_required(new_crtc_state)) {
    8105           0 :                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
    8106             :                         /* i.e. reset mode */
    8107           0 :                         if (dm_old_crtc_state->stream)
    8108           0 :                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
    8109             : 
    8110             :                         mode_set_reset_required = true;
    8111             :                 }
    8112             :         } /* for_each_crtc_in_state() */
    8113             : 
    8114           0 :         if (dc_state) {
    8115             :                 /* if there mode set or reset, disable eDP PSR */
    8116           0 :                 if (mode_set_reset_required) {
    8117           0 :                         if (dm->vblank_control_workqueue)
    8118           0 :                                 flush_workqueue(dm->vblank_control_workqueue);
    8119             : 
    8120           0 :                         amdgpu_dm_psr_disable_all(dm);
    8121             :                 }
    8122             : 
    8123           0 :                 dm_enable_per_frame_crtc_master_sync(dc_state);
    8124           0 :                 mutex_lock(&dm->dc_lock);
    8125           0 :                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
    8126             : 
    8127             :                 /* Allow idle optimization when vblank count is 0 for display off */
    8128           0 :                 if (dm->active_vblank_irq_count == 0)
    8129           0 :                         dc_allow_idle_optimizations(dm->dc, true);
    8130           0 :                 mutex_unlock(&dm->dc_lock);
    8131             :         }
    8132             : 
    8133           0 :         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
    8134           0 :                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
    8135             : 
    8136           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8137             : 
    8138           0 :                 if (dm_new_crtc_state->stream != NULL) {
    8139           0 :                         const struct dc_stream_status *status =
    8140             :                                         dc_stream_get_status(dm_new_crtc_state->stream);
    8141             : 
    8142           0 :                         if (!status)
    8143           0 :                                 status = dc_stream_get_status_from_state(dc_state,
    8144             :                                                                          dm_new_crtc_state->stream);
    8145           0 :                         if (!status)
    8146           0 :                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
    8147             :                         else
    8148           0 :                                 acrtc->otg_inst = status->primary_otg_inst;
    8149             :                 }
    8150             :         }
    8151             : #ifdef CONFIG_DRM_AMD_DC_HDCP
    8152             :         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
    8153             :                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
    8154             :                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
    8155             :                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    8156             : 
    8157             :                 new_crtc_state = NULL;
    8158             : 
    8159             :                 if (acrtc)
    8160             :                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
    8161             : 
    8162             :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8163             : 
    8164             :                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
    8165             :                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
    8166             :                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
    8167             :                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
    8168             :                         dm_new_con_state->update_hdcp = true;
    8169             :                         continue;
    8170             :                 }
    8171             : 
    8172             :                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
    8173             :                         hdcp_update_display(
    8174             :                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
    8175             :                                 new_con_state->hdcp_content_type,
    8176             :                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
    8177             :         }
    8178             : #endif
    8179             : 
    8180             :         /* Handle connector state changes */
    8181           0 :         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
    8182           0 :                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
    8183           0 :                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
    8184           0 :                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
    8185             :                 struct dc_surface_update dummy_updates[MAX_SURFACES];
    8186             :                 struct dc_stream_update stream_update;
    8187             :                 struct dc_info_packet hdr_packet;
    8188           0 :                 struct dc_stream_status *status = NULL;
    8189             :                 bool abm_changed, hdr_changed, scaling_changed;
    8190             : 
    8191           0 :                 memset(&dummy_updates, 0, sizeof(dummy_updates));
    8192           0 :                 memset(&stream_update, 0, sizeof(stream_update));
    8193             : 
    8194           0 :                 if (acrtc) {
    8195           0 :                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
    8196           0 :                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
    8197             :                 }
    8198             : 
    8199             :                 /* Skip any modesets/resets */
    8200           0 :                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
    8201           0 :                         continue;
    8202             : 
    8203           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8204           0 :                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
    8205             : 
    8206           0 :                 scaling_changed = is_scaling_state_different(dm_new_con_state,
    8207             :                                                              dm_old_con_state);
    8208             : 
    8209           0 :                 abm_changed = dm_new_crtc_state->abm_level !=
    8210           0 :                               dm_old_crtc_state->abm_level;
    8211             : 
    8212           0 :                 hdr_changed =
    8213           0 :                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
    8214             : 
    8215           0 :                 if (!scaling_changed && !abm_changed && !hdr_changed)
    8216           0 :                         continue;
    8217             : 
    8218           0 :                 stream_update.stream = dm_new_crtc_state->stream;
    8219           0 :                 if (scaling_changed) {
    8220           0 :                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
    8221             :                                         dm_new_con_state, dm_new_crtc_state->stream);
    8222             : 
    8223           0 :                         stream_update.src = dm_new_crtc_state->stream->src;
    8224           0 :                         stream_update.dst = dm_new_crtc_state->stream->dst;
    8225             :                 }
    8226             : 
    8227           0 :                 if (abm_changed) {
    8228           0 :                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
    8229             : 
    8230           0 :                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
    8231             :                 }
    8232             : 
    8233           0 :                 if (hdr_changed) {
    8234           0 :                         fill_hdr_info_packet(new_con_state, &hdr_packet);
    8235           0 :                         stream_update.hdr_static_metadata = &hdr_packet;
    8236             :                 }
    8237             : 
    8238           0 :                 status = dc_stream_get_status(dm_new_crtc_state->stream);
    8239             : 
    8240           0 :                 if (WARN_ON(!status))
    8241           0 :                         continue;
    8242             : 
    8243           0 :                 WARN_ON(!status->plane_count);
    8244             : 
    8245             :                 /*
    8246             :                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
    8247             :                  * Here we create an empty update on each plane.
    8248             :                  * To fix this, DC should permit updating only stream properties.
    8249             :                  */
    8250           0 :                 for (j = 0; j < status->plane_count; j++)
    8251           0 :                         dummy_updates[j].surface = status->plane_states[0];
    8252             : 
    8253             : 
    8254           0 :                 mutex_lock(&dm->dc_lock);
    8255           0 :                 dc_commit_updates_for_stream(dm->dc,
    8256             :                                                      dummy_updates,
    8257             :                                                      status->plane_count,
    8258             :                                                      dm_new_crtc_state->stream,
    8259             :                                                      &stream_update,
    8260             :                                                      dc_state);
    8261           0 :                 mutex_unlock(&dm->dc_lock);
    8262             :         }
    8263             : 
    8264             :         /* Count number of newly disabled CRTCs for dropping PM refs later. */
    8265           0 :         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
    8266             :                                       new_crtc_state, i) {
    8267           0 :                 if (old_crtc_state->active && !new_crtc_state->active)
    8268           0 :                         crtc_disable_count++;
    8269             : 
    8270           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8271           0 :                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
    8272             : 
    8273             :                 /* For freesync config update on crtc state and params for irq */
    8274           0 :                 update_stream_irq_parameters(dm, dm_new_crtc_state);
    8275             : 
    8276             :                 /* Handle vrr on->off / off->on transitions */
    8277           0 :                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
    8278             :                                                 dm_new_crtc_state);
    8279             :         }
    8280             : 
    8281             :         /**
    8282             :          * Enable interrupts for CRTCs that are newly enabled or went through
    8283             :          * a modeset. It was intentionally deferred until after the front end
    8284             :          * state was modified to wait until the OTG was on and so the IRQ
    8285             :          * handlers didn't access stale or invalid state.
    8286             :          */
    8287           0 :         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
    8288           0 :                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
    8289             : #ifdef CONFIG_DEBUG_FS
    8290             :                 bool configure_crc = false;
    8291             :                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
    8292             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    8293             :                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
    8294             : #endif
    8295             :                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
    8296             :                 cur_crc_src = acrtc->dm_irq_params.crc_src;
    8297             :                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
    8298             : #endif
    8299           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8300             : 
    8301           0 :                 if (new_crtc_state->active &&
    8302           0 :                     (!old_crtc_state->active ||
    8303           0 :                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
    8304           0 :                         dc_stream_retain(dm_new_crtc_state->stream);
    8305           0 :                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
    8306           0 :                         manage_dm_interrupts(adev, acrtc, true);
    8307             : 
    8308             : #ifdef CONFIG_DEBUG_FS
    8309             :                         /**
    8310             :                          * Frontend may have changed so reapply the CRC capture
    8311             :                          * settings for the stream.
    8312             :                          */
    8313             :                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8314             : 
    8315             :                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
    8316             :                                 configure_crc = true;
    8317             : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
    8318             :                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
    8319             :                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
    8320             :                                         acrtc->dm_irq_params.crc_window.update_win = true;
    8321             :                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
    8322             :                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
    8323             :                                         crc_rd_wrk->crtc = crtc;
    8324             :                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
    8325             :                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
    8326             :                                 }
    8327             : #endif
    8328             :                         }
    8329             : 
    8330             :                         if (configure_crc)
    8331             :                                 if (amdgpu_dm_crtc_configure_crc_source(
    8332             :                                         crtc, dm_new_crtc_state, cur_crc_src))
    8333             :                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
    8334             : #endif
    8335             :                 }
    8336             :         }
    8337             : 
    8338           0 :         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
    8339           0 :                 if (new_crtc_state->async_flip)
    8340           0 :                         wait_for_vblank = false;
    8341             : 
    8342             :         /* update planes when needed per crtc*/
    8343           0 :         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
    8344           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8345             : 
    8346           0 :                 if (dm_new_crtc_state->stream)
    8347           0 :                         amdgpu_dm_commit_planes(state, dc_state, dev,
    8348             :                                                 dm, crtc, wait_for_vblank);
    8349             :         }
    8350             : 
    8351             :         /* Update audio instances for each connector. */
    8352           0 :         amdgpu_dm_commit_audio(dev, state);
    8353             : 
    8354             :         /* restore the backlight level */
    8355           0 :         for (i = 0; i < dm->num_of_edps; i++) {
    8356           0 :                 if (dm->backlight_dev[i] &&
    8357           0 :                     (dm->actual_brightness[i] != dm->brightness[i]))
    8358           0 :                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
    8359             :         }
    8360             : 
    8361             :         /*
    8362             :          * send vblank event on all events not handled in flip and
    8363             :          * mark consumed event for drm_atomic_helper_commit_hw_done
    8364             :          */
    8365           0 :         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
    8366           0 :         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
    8367             : 
    8368           0 :                 if (new_crtc_state->event)
    8369           0 :                         drm_send_event_locked(dev, &new_crtc_state->event->base);
    8370             : 
    8371           0 :                 new_crtc_state->event = NULL;
    8372             :         }
    8373           0 :         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
    8374             : 
    8375             :         /* Signal HW programming completion */
    8376           0 :         drm_atomic_helper_commit_hw_done(state);
    8377             : 
    8378           0 :         if (wait_for_vblank)
    8379           0 :                 drm_atomic_helper_wait_for_flip_done(dev, state);
    8380             : 
    8381           0 :         drm_atomic_helper_cleanup_planes(dev, state);
    8382             : 
    8383             :         /* return the stolen vga memory back to VRAM */
    8384           0 :         if (!adev->mman.keep_stolen_vga_memory)
    8385           0 :                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
    8386           0 :         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
    8387             : 
    8388             :         /*
    8389             :          * Finally, drop a runtime PM reference for each newly disabled CRTC,
    8390             :          * so we can put the GPU into runtime suspend if we're not driving any
    8391             :          * displays anymore
    8392             :          */
    8393           0 :         for (i = 0; i < crtc_disable_count; i++)
    8394           0 :                 pm_runtime_put_autosuspend(dev->dev);
    8395           0 :         pm_runtime_mark_last_busy(dev->dev);
    8396             : 
    8397           0 :         if (dc_state_temp)
    8398           0 :                 dc_release_state(dc_state_temp);
    8399           0 : }
    8400             : 
    8401             : 
    8402           0 : static int dm_force_atomic_commit(struct drm_connector *connector)
    8403             : {
    8404           0 :         int ret = 0;
    8405           0 :         struct drm_device *ddev = connector->dev;
    8406           0 :         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
    8407           0 :         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
    8408           0 :         struct drm_plane *plane = disconnected_acrtc->base.primary;
    8409             :         struct drm_connector_state *conn_state;
    8410             :         struct drm_crtc_state *crtc_state;
    8411             :         struct drm_plane_state *plane_state;
    8412             : 
    8413           0 :         if (!state)
    8414             :                 return -ENOMEM;
    8415             : 
    8416           0 :         state->acquire_ctx = ddev->mode_config.acquire_ctx;
    8417             : 
    8418             :         /* Construct an atomic state to restore previous display setting */
    8419             : 
    8420             :         /*
    8421             :          * Attach connectors to drm_atomic_state
    8422             :          */
    8423           0 :         conn_state = drm_atomic_get_connector_state(state, connector);
    8424             : 
    8425           0 :         ret = PTR_ERR_OR_ZERO(conn_state);
    8426           0 :         if (ret)
    8427             :                 goto out;
    8428             : 
    8429             :         /* Attach crtc to drm_atomic_state*/
    8430           0 :         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
    8431             : 
    8432           0 :         ret = PTR_ERR_OR_ZERO(crtc_state);
    8433           0 :         if (ret)
    8434             :                 goto out;
    8435             : 
    8436             :         /* force a restore */
    8437           0 :         crtc_state->mode_changed = true;
    8438             : 
    8439             :         /* Attach plane to drm_atomic_state */
    8440           0 :         plane_state = drm_atomic_get_plane_state(state, plane);
    8441             : 
    8442           0 :         ret = PTR_ERR_OR_ZERO(plane_state);
    8443           0 :         if (ret)
    8444             :                 goto out;
    8445             : 
    8446             :         /* Call commit internally with the state we just constructed */
    8447           0 :         ret = drm_atomic_commit(state);
    8448             : 
    8449             : out:
    8450           0 :         drm_atomic_state_put(state);
    8451           0 :         if (ret)
    8452           0 :                 DRM_ERROR("Restoring old state failed with %i\n", ret);
    8453             : 
    8454             :         return ret;
    8455             : }
    8456             : 
    8457             : /*
    8458             :  * This function handles all cases when set mode does not come upon hotplug.
    8459             :  * This includes when a display is unplugged then plugged back into the
    8460             :  * same port and when running without usermode desktop manager supprot
    8461             :  */
    8462           0 : void dm_restore_drm_connector_state(struct drm_device *dev,
    8463             :                                     struct drm_connector *connector)
    8464             : {
    8465           0 :         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
    8466             :         struct amdgpu_crtc *disconnected_acrtc;
    8467             :         struct dm_crtc_state *acrtc_state;
    8468             : 
    8469           0 :         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
    8470             :                 return;
    8471             : 
    8472           0 :         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
    8473           0 :         if (!disconnected_acrtc)
    8474             :                 return;
    8475             : 
    8476           0 :         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
    8477           0 :         if (!acrtc_state->stream)
    8478             :                 return;
    8479             : 
    8480             :         /*
    8481             :          * If the previous sink is not released and different from the current,
    8482             :          * we deduce we are in a state where we can not rely on usermode call
    8483             :          * to turn on the display, so we do it here
    8484             :          */
    8485           0 :         if (acrtc_state->stream->sink != aconnector->dc_sink)
    8486           0 :                 dm_force_atomic_commit(&aconnector->base);
    8487             : }
    8488             : 
    8489             : /*
    8490             :  * Grabs all modesetting locks to serialize against any blocking commits,
    8491             :  * Waits for completion of all non blocking commits.
    8492             :  */
    8493           0 : static int do_aquire_global_lock(struct drm_device *dev,
    8494             :                                  struct drm_atomic_state *state)
    8495             : {
    8496             :         struct drm_crtc *crtc;
    8497             :         struct drm_crtc_commit *commit;
    8498             :         long ret;
    8499             : 
    8500             :         /*
    8501             :          * Adding all modeset locks to aquire_ctx will
    8502             :          * ensure that when the framework release it the
    8503             :          * extra locks we are locking here will get released to
    8504             :          */
    8505           0 :         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
    8506           0 :         if (ret)
    8507             :                 return ret;
    8508             : 
    8509           0 :         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
    8510           0 :                 spin_lock(&crtc->commit_lock);
    8511           0 :                 commit = list_first_entry_or_null(&crtc->commit_list,
    8512             :                                 struct drm_crtc_commit, commit_entry);
    8513           0 :                 if (commit)
    8514             :                         drm_crtc_commit_get(commit);
    8515           0 :                 spin_unlock(&crtc->commit_lock);
    8516             : 
    8517           0 :                 if (!commit)
    8518           0 :                         continue;
    8519             : 
    8520             :                 /*
    8521             :                  * Make sure all pending HW programming completed and
    8522             :                  * page flips done
    8523             :                  */
    8524           0 :                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
    8525             : 
    8526           0 :                 if (ret > 0)
    8527           0 :                         ret = wait_for_completion_interruptible_timeout(
    8528             :                                         &commit->flip_done, 10*HZ);
    8529             : 
    8530           0 :                 if (ret == 0)
    8531           0 :                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
    8532             :                                   "timed out\n", crtc->base.id, crtc->name);
    8533             : 
    8534             :                 drm_crtc_commit_put(commit);
    8535             :         }
    8536             : 
    8537           0 :         return ret < 0 ? ret : 0;
    8538             : }
    8539             : 
    8540           0 : static void get_freesync_config_for_crtc(
    8541             :         struct dm_crtc_state *new_crtc_state,
    8542             :         struct dm_connector_state *new_con_state)
    8543             : {
    8544           0 :         struct mod_freesync_config config = {0};
    8545           0 :         struct amdgpu_dm_connector *aconnector =
    8546           0 :                         to_amdgpu_dm_connector(new_con_state->base.connector);
    8547           0 :         struct drm_display_mode *mode = &new_crtc_state->base.mode;
    8548           0 :         int vrefresh = drm_mode_vrefresh(mode);
    8549           0 :         bool fs_vid_mode = false;
    8550             : 
    8551           0 :         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
    8552           0 :                                         vrefresh >= aconnector->min_vfreq &&
    8553           0 :                                         vrefresh <= aconnector->max_vfreq;
    8554             : 
    8555           0 :         if (new_crtc_state->vrr_supported) {
    8556           0 :                 new_crtc_state->stream->ignore_msa_timing_param = true;
    8557           0 :                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
    8558             : 
    8559           0 :                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
    8560           0 :                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
    8561           0 :                 config.vsif_supported = true;
    8562           0 :                 config.btr = true;
    8563             : 
    8564           0 :                 if (fs_vid_mode) {
    8565           0 :                         config.state = VRR_STATE_ACTIVE_FIXED;
    8566           0 :                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
    8567             :                         goto out;
    8568           0 :                 } else if (new_crtc_state->base.vrr_enabled) {
    8569             :                         config.state = VRR_STATE_ACTIVE_VARIABLE;
    8570             :                 } else {
    8571           0 :                         config.state = VRR_STATE_INACTIVE;
    8572             :                 }
    8573             :         }
    8574             : out:
    8575           0 :         new_crtc_state->freesync_config = config;
    8576           0 : }
    8577             : 
    8578             : static void reset_freesync_config_for_crtc(
    8579             :         struct dm_crtc_state *new_crtc_state)
    8580             : {
    8581           0 :         new_crtc_state->vrr_supported = false;
    8582             : 
    8583           0 :         memset(&new_crtc_state->vrr_infopacket, 0,
    8584             :                sizeof(new_crtc_state->vrr_infopacket));
    8585             : }
    8586             : 
    8587             : static bool
    8588           0 : is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
    8589             :                                  struct drm_crtc_state *new_crtc_state)
    8590             : {
    8591             :         const struct drm_display_mode *old_mode, *new_mode;
    8592             : 
    8593           0 :         if (!old_crtc_state || !new_crtc_state)
    8594             :                 return false;
    8595             : 
    8596           0 :         old_mode = &old_crtc_state->mode;
    8597           0 :         new_mode = &new_crtc_state->mode;
    8598             : 
    8599           0 :         if (old_mode->clock       == new_mode->clock &&
    8600             :             old_mode->hdisplay    == new_mode->hdisplay &&
    8601           0 :             old_mode->vdisplay    == new_mode->vdisplay &&
    8602           0 :             old_mode->htotal      == new_mode->htotal &&
    8603           0 :             old_mode->vtotal      != new_mode->vtotal &&
    8604           0 :             old_mode->hsync_start == new_mode->hsync_start &&
    8605           0 :             old_mode->vsync_start != new_mode->vsync_start &&
    8606           0 :             old_mode->hsync_end   == new_mode->hsync_end &&
    8607           0 :             old_mode->vsync_end   != new_mode->vsync_end &&
    8608           0 :             old_mode->hskew       == new_mode->hskew &&
    8609           0 :             old_mode->vscan       == new_mode->vscan &&
    8610           0 :             (old_mode->vsync_end - old_mode->vsync_start) ==
    8611           0 :             (new_mode->vsync_end - new_mode->vsync_start))
    8612             :                 return true;
    8613             : 
    8614           0 :         return false;
    8615             : }
    8616             : 
    8617             : static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
    8618             :         uint64_t num, den, res;
    8619           0 :         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
    8620             : 
    8621           0 :         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
    8622             : 
    8623           0 :         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
    8624           0 :         den = (unsigned long long)new_crtc_state->mode.htotal *
    8625           0 :               (unsigned long long)new_crtc_state->mode.vtotal;
    8626             : 
    8627           0 :         res = div_u64(num, den);
    8628           0 :         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
    8629             : }
    8630             : 
    8631           0 : static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
    8632             :                          struct drm_atomic_state *state,
    8633             :                          struct drm_crtc *crtc,
    8634             :                          struct drm_crtc_state *old_crtc_state,
    8635             :                          struct drm_crtc_state *new_crtc_state,
    8636             :                          bool enable,
    8637             :                          bool *lock_and_validation_needed)
    8638             : {
    8639           0 :         struct dm_atomic_state *dm_state = NULL;
    8640             :         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
    8641             :         struct dc_stream_state *new_stream;
    8642           0 :         int ret = 0;
    8643             : 
    8644             :         /*
    8645             :          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
    8646             :          * update changed items
    8647             :          */
    8648           0 :         struct amdgpu_crtc *acrtc = NULL;
    8649           0 :         struct amdgpu_dm_connector *aconnector = NULL;
    8650           0 :         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
    8651           0 :         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
    8652             : 
    8653           0 :         new_stream = NULL;
    8654             : 
    8655           0 :         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
    8656           0 :         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    8657           0 :         acrtc = to_amdgpu_crtc(crtc);
    8658           0 :         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
    8659             : 
    8660             :         /* TODO This hack should go away */
    8661           0 :         if (aconnector && enable) {
    8662             :                 /* Make sure fake sink is created in plug-in scenario */
    8663           0 :                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
    8664             :                                                             &aconnector->base);
    8665           0 :                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
    8666             :                                                             &aconnector->base);
    8667             : 
    8668           0 :                 if (IS_ERR(drm_new_conn_state)) {
    8669           0 :                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
    8670           0 :                         goto fail;
    8671             :                 }
    8672             : 
    8673           0 :                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
    8674           0 :                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
    8675             : 
    8676           0 :                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
    8677             :                         goto skip_modeset;
    8678             : 
    8679           0 :                 new_stream = create_validate_stream_for_sink(aconnector,
    8680           0 :                                                              &new_crtc_state->mode,
    8681             :                                                              dm_new_conn_state,
    8682           0 :                                                              dm_old_crtc_state->stream);
    8683             : 
    8684             :                 /*
    8685             :                  * we can have no stream on ACTION_SET if a display
    8686             :                  * was disconnected during S3, in this case it is not an
    8687             :                  * error, the OS will be updated after detection, and
    8688             :                  * will do the right thing on next atomic commit
    8689             :                  */
    8690             : 
    8691           0 :                 if (!new_stream) {
    8692           0 :                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
    8693             :                                         __func__, acrtc->base.base.id);
    8694           0 :                         ret = -ENOMEM;
    8695           0 :                         goto fail;
    8696             :                 }
    8697             : 
    8698             :                 /*
    8699             :                  * TODO: Check VSDB bits to decide whether this should
    8700             :                  * be enabled or not.
    8701             :                  */
    8702           0 :                 new_stream->triggered_crtc_reset.enabled =
    8703           0 :                         dm->force_timing_sync;
    8704             : 
    8705           0 :                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
    8706             : 
    8707           0 :                 ret = fill_hdr_info_packet(drm_new_conn_state,
    8708             :                                            &new_stream->hdr_static_metadata);
    8709           0 :                 if (ret)
    8710             :                         goto fail;
    8711             : 
    8712             :                 /*
    8713             :                  * If we already removed the old stream from the context
    8714             :                  * (and set the new stream to NULL) then we can't reuse
    8715             :                  * the old stream even if the stream and scaling are unchanged.
    8716             :                  * We'll hit the BUG_ON and black screen.
    8717             :                  *
    8718             :                  * TODO: Refactor this function to allow this check to work
    8719             :                  * in all conditions.
    8720             :                  */
    8721           0 :                 if (dm_new_crtc_state->stream &&
    8722           0 :                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
    8723             :                         goto skip_modeset;
    8724             : 
    8725           0 :                 if (dm_new_crtc_state->stream &&
    8726           0 :                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
    8727           0 :                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
    8728           0 :                         new_crtc_state->mode_changed = false;
    8729           0 :                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
    8730             :                                          new_crtc_state->mode_changed);
    8731             :                 }
    8732             :         }
    8733             : 
    8734             :         /* mode_changed flag may get updated above, need to check again */
    8735           0 :         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
    8736             :                 goto skip_modeset;
    8737             : 
    8738           0 :         drm_dbg_state(state->dev,
    8739             :                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
    8740             :                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
    8741             :                 "connectors_changed:%d\n",
    8742             :                 acrtc->crtc_id,
    8743             :                 new_crtc_state->enable,
    8744             :                 new_crtc_state->active,
    8745             :                 new_crtc_state->planes_changed,
    8746             :                 new_crtc_state->mode_changed,
    8747             :                 new_crtc_state->active_changed,
    8748             :                 new_crtc_state->connectors_changed);
    8749             : 
    8750             :         /* Remove stream for any changed/disabled CRTC */
    8751           0 :         if (!enable) {
    8752             : 
    8753           0 :                 if (!dm_old_crtc_state->stream)
    8754             :                         goto skip_modeset;
    8755             : 
    8756           0 :                 if (dm_new_crtc_state->stream &&
    8757           0 :                     is_timing_unchanged_for_freesync(new_crtc_state,
    8758             :                                                      old_crtc_state)) {
    8759           0 :                         new_crtc_state->mode_changed = false;
    8760           0 :                         DRM_DEBUG_DRIVER(
    8761             :                                 "Mode change not required for front porch change, "
    8762             :                                 "setting mode_changed to %d",
    8763             :                                 new_crtc_state->mode_changed);
    8764             : 
    8765             :                         set_freesync_fixed_config(dm_new_crtc_state);
    8766             : 
    8767             :                         goto skip_modeset;
    8768           0 :                 } else if (aconnector &&
    8769           0 :                            is_freesync_video_mode(&new_crtc_state->mode,
    8770             :                                                   aconnector)) {
    8771             :                         struct drm_display_mode *high_mode;
    8772             : 
    8773           0 :                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
    8774           0 :                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
    8775             :                                 set_freesync_fixed_config(dm_new_crtc_state);
    8776             :                         }
    8777             :                 }
    8778             : 
    8779           0 :                 ret = dm_atomic_get_state(state, &dm_state);
    8780           0 :                 if (ret)
    8781             :                         goto fail;
    8782             : 
    8783           0 :                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
    8784             :                                 crtc->base.id);
    8785             : 
    8786             :                 /* i.e. reset mode */
    8787           0 :                 if (dc_remove_stream_from_ctx(
    8788             :                                 dm->dc,
    8789             :                                 dm_state->context,
    8790             :                                 dm_old_crtc_state->stream) != DC_OK) {
    8791             :                         ret = -EINVAL;
    8792             :                         goto fail;
    8793             :                 }
    8794             : 
    8795           0 :                 dc_stream_release(dm_old_crtc_state->stream);
    8796           0 :                 dm_new_crtc_state->stream = NULL;
    8797             : 
    8798           0 :                 reset_freesync_config_for_crtc(dm_new_crtc_state);
    8799             : 
    8800           0 :                 *lock_and_validation_needed = true;
    8801             : 
    8802             :         } else {/* Add stream for any updated/enabled CRTC */
    8803             :                 /*
    8804             :                  * Quick fix to prevent NULL pointer on new_stream when
    8805             :                  * added MST connectors not found in existing crtc_state in the chained mode
    8806             :                  * TODO: need to dig out the root cause of that
    8807             :                  */
    8808           0 :                 if (!aconnector)
    8809             :                         goto skip_modeset;
    8810             : 
    8811           0 :                 if (modereset_required(new_crtc_state))
    8812             :                         goto skip_modeset;
    8813             : 
    8814           0 :                 if (modeset_required(new_crtc_state, new_stream,
    8815             :                                      dm_old_crtc_state->stream)) {
    8816             : 
    8817           0 :                         WARN_ON(dm_new_crtc_state->stream);
    8818             : 
    8819           0 :                         ret = dm_atomic_get_state(state, &dm_state);
    8820           0 :                         if (ret)
    8821             :                                 goto fail;
    8822             : 
    8823           0 :                         dm_new_crtc_state->stream = new_stream;
    8824             : 
    8825           0 :                         dc_stream_retain(new_stream);
    8826             : 
    8827           0 :                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
    8828             :                                          crtc->base.id);
    8829             : 
    8830           0 :                         if (dc_add_stream_to_ctx(
    8831             :                                         dm->dc,
    8832             :                                         dm_state->context,
    8833             :                                         dm_new_crtc_state->stream) != DC_OK) {
    8834             :                                 ret = -EINVAL;
    8835             :                                 goto fail;
    8836             :                         }
    8837             : 
    8838           0 :                         *lock_and_validation_needed = true;
    8839             :                 }
    8840             :         }
    8841             : 
    8842             : skip_modeset:
    8843             :         /* Release extra reference */
    8844           0 :         if (new_stream)
    8845           0 :                  dc_stream_release(new_stream);
    8846             : 
    8847             :         /*
    8848             :          * We want to do dc stream updates that do not require a
    8849             :          * full modeset below.
    8850             :          */
    8851           0 :         if (!(enable && aconnector && new_crtc_state->active))
    8852             :                 return 0;
    8853             :         /*
    8854             :          * Given above conditions, the dc state cannot be NULL because:
    8855             :          * 1. We're in the process of enabling CRTCs (just been added
    8856             :          *    to the dc context, or already is on the context)
    8857             :          * 2. Has a valid connector attached, and
    8858             :          * 3. Is currently active and enabled.
    8859             :          * => The dc stream state currently exists.
    8860             :          */
    8861           0 :         BUG_ON(dm_new_crtc_state->stream == NULL);
    8862             : 
    8863             :         /* Scaling or underscan settings */
    8864           0 :         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
    8865           0 :                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
    8866           0 :                 update_stream_scaling_settings(
    8867           0 :                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
    8868             : 
    8869             :         /* ABM settings */
    8870           0 :         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
    8871             : 
    8872             :         /*
    8873             :          * Color management settings. We also update color properties
    8874             :          * when a modeset is needed, to ensure it gets reprogrammed.
    8875             :          */
    8876           0 :         if (dm_new_crtc_state->base.color_mgmt_changed ||
    8877           0 :             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
    8878           0 :                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
    8879           0 :                 if (ret)
    8880             :                         goto fail;
    8881             :         }
    8882             : 
    8883             :         /* Update Freesync settings. */
    8884           0 :         get_freesync_config_for_crtc(dm_new_crtc_state,
    8885             :                                      dm_new_conn_state);
    8886             : 
    8887           0 :         return ret;
    8888             : 
    8889             : fail:
    8890           0 :         if (new_stream)
    8891           0 :                 dc_stream_release(new_stream);
    8892             :         return ret;
    8893             : }
    8894             : 
    8895           0 : static bool should_reset_plane(struct drm_atomic_state *state,
    8896             :                                struct drm_plane *plane,
    8897             :                                struct drm_plane_state *old_plane_state,
    8898             :                                struct drm_plane_state *new_plane_state)
    8899             : {
    8900             :         struct drm_plane *other;
    8901             :         struct drm_plane_state *old_other_state, *new_other_state;
    8902             :         struct drm_crtc_state *new_crtc_state;
    8903             :         int i;
    8904             : 
    8905             :         /*
    8906             :          * TODO: Remove this hack once the checks below are sufficient
    8907             :          * enough to determine when we need to reset all the planes on
    8908             :          * the stream.
    8909             :          */
    8910           0 :         if (state->allow_modeset)
    8911             :                 return true;
    8912             : 
    8913             :         /* Exit early if we know that we're adding or removing the plane. */
    8914           0 :         if (old_plane_state->crtc != new_plane_state->crtc)
    8915             :                 return true;
    8916             : 
    8917             :         /* old crtc == new_crtc == NULL, plane not in context. */
    8918           0 :         if (!new_plane_state->crtc)
    8919             :                 return false;
    8920             : 
    8921           0 :         new_crtc_state =
    8922             :                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
    8923             : 
    8924           0 :         if (!new_crtc_state)
    8925             :                 return true;
    8926             : 
    8927             :         /* CRTC Degamma changes currently require us to recreate planes. */
    8928           0 :         if (new_crtc_state->color_mgmt_changed)
    8929             :                 return true;
    8930             : 
    8931           0 :         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
    8932             :                 return true;
    8933             : 
    8934             :         /*
    8935             :          * If there are any new primary or overlay planes being added or
    8936             :          * removed then the z-order can potentially change. To ensure
    8937             :          * correct z-order and pipe acquisition the current DC architecture
    8938             :          * requires us to remove and recreate all existing planes.
    8939             :          *
    8940             :          * TODO: Come up with a more elegant solution for this.
    8941             :          */
    8942           0 :         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
    8943             :                 struct amdgpu_framebuffer *old_afb, *new_afb;
    8944           0 :                 if (other->type == DRM_PLANE_TYPE_CURSOR)
    8945           0 :                         continue;
    8946             : 
    8947           0 :                 if (old_other_state->crtc != new_plane_state->crtc &&
    8948           0 :                     new_other_state->crtc != new_plane_state->crtc)
    8949           0 :                         continue;
    8950             : 
    8951           0 :                 if (old_other_state->crtc != new_other_state->crtc)
    8952             :                         return true;
    8953             : 
    8954             :                 /* Src/dst size and scaling updates. */
    8955           0 :                 if (old_other_state->src_w != new_other_state->src_w ||
    8956             :                     old_other_state->src_h != new_other_state->src_h ||
    8957           0 :                     old_other_state->crtc_w != new_other_state->crtc_w ||
    8958             :                     old_other_state->crtc_h != new_other_state->crtc_h)
    8959             :                         return true;
    8960             : 
    8961             :                 /* Rotation / mirroring updates. */
    8962           0 :                 if (old_other_state->rotation != new_other_state->rotation)
    8963             :                         return true;
    8964             : 
    8965             :                 /* Blending updates. */
    8966           0 :                 if (old_other_state->pixel_blend_mode !=
    8967           0 :                     new_other_state->pixel_blend_mode)
    8968             :                         return true;
    8969             : 
    8970             :                 /* Alpha updates. */
    8971           0 :                 if (old_other_state->alpha != new_other_state->alpha)
    8972             :                         return true;
    8973             : 
    8974             :                 /* Colorspace changes. */
    8975           0 :                 if (old_other_state->color_range != new_other_state->color_range ||
    8976             :                     old_other_state->color_encoding != new_other_state->color_encoding)
    8977             :                         return true;
    8978             : 
    8979             :                 /* Framebuffer checks fall at the end. */
    8980           0 :                 if (!old_other_state->fb || !new_other_state->fb)
    8981           0 :                         continue;
    8982             : 
    8983             :                 /* Pixel format changes can require bandwidth updates. */
    8984           0 :                 if (old_other_state->fb->format != new_other_state->fb->format)
    8985             :                         return true;
    8986             : 
    8987           0 :                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
    8988           0 :                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
    8989             : 
    8990             :                 /* Tiling and DCC changes also require bandwidth updates. */
    8991           0 :                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
    8992           0 :                     old_afb->base.modifier != new_afb->base.modifier)
    8993             :                         return true;
    8994             :         }
    8995             : 
    8996             :         return false;
    8997             : }
    8998             : 
    8999           0 : static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
    9000             :                               struct drm_plane_state *new_plane_state,
    9001             :                               struct drm_framebuffer *fb)
    9002             : {
    9003           0 :         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
    9004           0 :         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
    9005             :         unsigned int pitch;
    9006             :         bool linear;
    9007             : 
    9008           0 :         if (fb->width > new_acrtc->max_cursor_width ||
    9009           0 :             fb->height > new_acrtc->max_cursor_height) {
    9010           0 :                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
    9011             :                                  new_plane_state->fb->width,
    9012             :                                  new_plane_state->fb->height);
    9013           0 :                 return -EINVAL;
    9014             :         }
    9015           0 :         if (new_plane_state->src_w != fb->width << 16 ||
    9016           0 :             new_plane_state->src_h != fb->height << 16) {
    9017           0 :                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
    9018           0 :                 return -EINVAL;
    9019             :         }
    9020             : 
    9021             :         /* Pitch in pixels */
    9022           0 :         pitch = fb->pitches[0] / fb->format->cpp[0];
    9023             : 
    9024           0 :         if (fb->width != pitch) {
    9025           0 :                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
    9026             :                                  fb->width, pitch);
    9027           0 :                 return -EINVAL;
    9028             :         }
    9029             : 
    9030           0 :         switch (pitch) {
    9031             :         case 64:
    9032             :         case 128:
    9033             :         case 256:
    9034             :                 /* FB pitch is supported by cursor plane */
    9035             :                 break;
    9036             :         default:
    9037           0 :                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
    9038           0 :                 return -EINVAL;
    9039             :         }
    9040             : 
    9041             :         /* Core DRM takes care of checking FB modifiers, so we only need to
    9042             :          * check tiling flags when the FB doesn't have a modifier. */
    9043           0 :         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
    9044           0 :                 if (adev->family < AMDGPU_FAMILY_AI) {
    9045           0 :                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
    9046           0 :                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
    9047           0 :                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
    9048             :                 } else {
    9049           0 :                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
    9050             :                 }
    9051           0 :                 if (!linear) {
    9052           0 :                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
    9053           0 :                         return -EINVAL;
    9054             :                 }
    9055             :         }
    9056             : 
    9057             :         return 0;
    9058             : }
    9059             : 
    9060           0 : static int dm_update_plane_state(struct dc *dc,
    9061             :                                  struct drm_atomic_state *state,
    9062             :                                  struct drm_plane *plane,
    9063             :                                  struct drm_plane_state *old_plane_state,
    9064             :                                  struct drm_plane_state *new_plane_state,
    9065             :                                  bool enable,
    9066             :                                  bool *lock_and_validation_needed)
    9067             : {
    9068             : 
    9069           0 :         struct dm_atomic_state *dm_state = NULL;
    9070             :         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
    9071             :         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
    9072             :         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
    9073             :         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
    9074             :         struct amdgpu_crtc *new_acrtc;
    9075             :         bool needs_reset;
    9076           0 :         int ret = 0;
    9077             : 
    9078             : 
    9079           0 :         new_plane_crtc = new_plane_state->crtc;
    9080           0 :         old_plane_crtc = old_plane_state->crtc;
    9081           0 :         dm_new_plane_state = to_dm_plane_state(new_plane_state);
    9082           0 :         dm_old_plane_state = to_dm_plane_state(old_plane_state);
    9083             : 
    9084           0 :         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
    9085           0 :                 if (!enable || !new_plane_crtc ||
    9086           0 :                         drm_atomic_plane_disabling(plane->state, new_plane_state))
    9087             :                         return 0;
    9088             : 
    9089           0 :                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
    9090             : 
    9091           0 :                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
    9092           0 :                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
    9093           0 :                         return -EINVAL;
    9094             :                 }
    9095             : 
    9096           0 :                 if (new_plane_state->fb) {
    9097           0 :                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
    9098             :                                                  new_plane_state->fb);
    9099           0 :                         if (ret)
    9100             :                                 return ret;
    9101             :                 }
    9102             : 
    9103             :                 return 0;
    9104             :         }
    9105             : 
    9106           0 :         needs_reset = should_reset_plane(state, plane, old_plane_state,
    9107             :                                          new_plane_state);
    9108             : 
    9109             :         /* Remove any changed/removed planes */
    9110           0 :         if (!enable) {
    9111           0 :                 if (!needs_reset)
    9112             :                         return 0;
    9113             : 
    9114           0 :                 if (!old_plane_crtc)
    9115             :                         return 0;
    9116             : 
    9117           0 :                 old_crtc_state = drm_atomic_get_old_crtc_state(
    9118             :                                 state, old_plane_crtc);
    9119           0 :                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
    9120             : 
    9121           0 :                 if (!dm_old_crtc_state->stream)
    9122             :                         return 0;
    9123             : 
    9124           0 :                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
    9125             :                                 plane->base.id, old_plane_crtc->base.id);
    9126             : 
    9127           0 :                 ret = dm_atomic_get_state(state, &dm_state);
    9128           0 :                 if (ret)
    9129             :                         return ret;
    9130             : 
    9131           0 :                 if (!dc_remove_plane_from_context(
    9132             :                                 dc,
    9133             :                                 dm_old_crtc_state->stream,
    9134             :                                 dm_old_plane_state->dc_state,
    9135             :                                 dm_state->context)) {
    9136             : 
    9137             :                         return -EINVAL;
    9138             :                 }
    9139             : 
    9140             : 
    9141           0 :                 dc_plane_state_release(dm_old_plane_state->dc_state);
    9142           0 :                 dm_new_plane_state->dc_state = NULL;
    9143             : 
    9144           0 :                 *lock_and_validation_needed = true;
    9145             : 
    9146             :         } else { /* Add new planes */
    9147             :                 struct dc_plane_state *dc_new_plane_state;
    9148             : 
    9149           0 :                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
    9150             :                         return 0;
    9151             : 
    9152           0 :                 if (!new_plane_crtc)
    9153             :                         return 0;
    9154             : 
    9155           0 :                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
    9156           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    9157             : 
    9158           0 :                 if (!dm_new_crtc_state->stream)
    9159             :                         return 0;
    9160             : 
    9161           0 :                 if (!needs_reset)
    9162             :                         return 0;
    9163             : 
    9164           0 :                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
    9165           0 :                 if (ret)
    9166             :                         return ret;
    9167             : 
    9168           0 :                 WARN_ON(dm_new_plane_state->dc_state);
    9169             : 
    9170           0 :                 dc_new_plane_state = dc_create_plane_state(dc);
    9171           0 :                 if (!dc_new_plane_state)
    9172             :                         return -ENOMEM;
    9173             : 
    9174           0 :                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
    9175             :                                  plane->base.id, new_plane_crtc->base.id);
    9176             : 
    9177           0 :                 ret = fill_dc_plane_attributes(
    9178             :                         drm_to_adev(new_plane_crtc->dev),
    9179             :                         dc_new_plane_state,
    9180             :                         new_plane_state,
    9181             :                         new_crtc_state);
    9182           0 :                 if (ret) {
    9183           0 :                         dc_plane_state_release(dc_new_plane_state);
    9184           0 :                         return ret;
    9185             :                 }
    9186             : 
    9187           0 :                 ret = dm_atomic_get_state(state, &dm_state);
    9188           0 :                 if (ret) {
    9189           0 :                         dc_plane_state_release(dc_new_plane_state);
    9190           0 :                         return ret;
    9191             :                 }
    9192             : 
    9193             :                 /*
    9194             :                  * Any atomic check errors that occur after this will
    9195             :                  * not need a release. The plane state will be attached
    9196             :                  * to the stream, and therefore part of the atomic
    9197             :                  * state. It'll be released when the atomic state is
    9198             :                  * cleaned.
    9199             :                  */
    9200           0 :                 if (!dc_add_plane_to_context(
    9201             :                                 dc,
    9202             :                                 dm_new_crtc_state->stream,
    9203             :                                 dc_new_plane_state,
    9204             :                                 dm_state->context)) {
    9205             : 
    9206           0 :                         dc_plane_state_release(dc_new_plane_state);
    9207           0 :                         return -EINVAL;
    9208             :                 }
    9209             : 
    9210           0 :                 dm_new_plane_state->dc_state = dc_new_plane_state;
    9211             : 
    9212           0 :                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
    9213             : 
    9214             :                 /* Tell DC to do a full surface update every time there
    9215             :                  * is a plane change. Inefficient, but works for now.
    9216             :                  */
    9217           0 :                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
    9218             : 
    9219           0 :                 *lock_and_validation_needed = true;
    9220             :         }
    9221             : 
    9222             : 
    9223             :         return ret;
    9224             : }
    9225             : 
    9226             : static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
    9227             :                                        int *src_w, int *src_h)
    9228             : {
    9229           0 :         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
    9230             :         case DRM_MODE_ROTATE_90:
    9231             :         case DRM_MODE_ROTATE_270:
    9232           0 :                 *src_w = plane_state->src_h >> 16;
    9233           0 :                 *src_h = plane_state->src_w >> 16;
    9234             :                 break;
    9235             :         case DRM_MODE_ROTATE_0:
    9236             :         case DRM_MODE_ROTATE_180:
    9237             :         default:
    9238           0 :                 *src_w = plane_state->src_w >> 16;
    9239           0 :                 *src_h = plane_state->src_h >> 16;
    9240             :                 break;
    9241             :         }
    9242             : }
    9243             : 
    9244           0 : static int dm_check_crtc_cursor(struct drm_atomic_state *state,
    9245             :                                 struct drm_crtc *crtc,
    9246             :                                 struct drm_crtc_state *new_crtc_state)
    9247             : {
    9248           0 :         struct drm_plane *cursor = crtc->cursor, *underlying;
    9249             :         struct drm_plane_state *new_cursor_state, *new_underlying_state;
    9250             :         int i;
    9251             :         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
    9252             :         int cursor_src_w, cursor_src_h;
    9253             :         int underlying_src_w, underlying_src_h;
    9254             : 
    9255             :         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
    9256             :          * cursor per pipe but it's going to inherit the scaling and
    9257             :          * positioning from the underlying pipe. Check the cursor plane's
    9258             :          * blending properties match the underlying planes'. */
    9259             : 
    9260           0 :         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
    9261           0 :         if (!new_cursor_state || !new_cursor_state->fb) {
    9262             :                 return 0;
    9263             :         }
    9264             : 
    9265           0 :         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
    9266           0 :         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
    9267           0 :         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
    9268             : 
    9269           0 :         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
    9270             :                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
    9271           0 :                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
    9272           0 :                         continue;
    9273             : 
    9274             :                 /* Ignore disabled planes */
    9275           0 :                 if (!new_underlying_state->fb)
    9276           0 :                         continue;
    9277             : 
    9278           0 :                 dm_get_oriented_plane_size(new_underlying_state,
    9279             :                                            &underlying_src_w, &underlying_src_h);
    9280           0 :                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
    9281           0 :                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
    9282             : 
    9283           0 :                 if (cursor_scale_w != underlying_scale_w ||
    9284           0 :                     cursor_scale_h != underlying_scale_h) {
    9285           0 :                         drm_dbg_atomic(crtc->dev,
    9286             :                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
    9287             :                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
    9288             :                         return -EINVAL;
    9289             :                 }
    9290             : 
    9291             :                 /* If this plane covers the whole CRTC, no need to check planes underneath */
    9292           0 :                 if (new_underlying_state->crtc_x <= 0 &&
    9293           0 :                     new_underlying_state->crtc_y <= 0 &&
    9294           0 :                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
    9295           0 :                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
    9296             :                         break;
    9297             :         }
    9298             : 
    9299             :         return 0;
    9300             : }
    9301             : 
    9302             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    9303           0 : static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
    9304             : {
    9305             :         struct drm_connector *connector;
    9306             :         struct drm_connector_state *conn_state, *old_conn_state;
    9307           0 :         struct amdgpu_dm_connector *aconnector = NULL;
    9308             :         int i;
    9309           0 :         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
    9310           0 :                 if (!conn_state->crtc)
    9311           0 :                         conn_state = old_conn_state;
    9312             : 
    9313           0 :                 if (conn_state->crtc != crtc)
    9314           0 :                         continue;
    9315             : 
    9316           0 :                 aconnector = to_amdgpu_dm_connector(connector);
    9317           0 :                 if (!aconnector->port || !aconnector->mst_port)
    9318             :                         aconnector = NULL;
    9319             :                 else
    9320             :                         break;
    9321             :         }
    9322             : 
    9323           0 :         if (!aconnector)
    9324             :                 return 0;
    9325             : 
    9326           0 :         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
    9327             : }
    9328             : #endif
    9329             : 
    9330             : /**
    9331             :  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
    9332             :  *
    9333             :  * @dev: The DRM device
    9334             :  * @state: The atomic state to commit
    9335             :  *
    9336             :  * Validate that the given atomic state is programmable by DC into hardware.
    9337             :  * This involves constructing a &struct dc_state reflecting the new hardware
    9338             :  * state we wish to commit, then querying DC to see if it is programmable. It's
    9339             :  * important not to modify the existing DC state. Otherwise, atomic_check
    9340             :  * may unexpectedly commit hardware changes.
    9341             :  *
    9342             :  * When validating the DC state, it's important that the right locks are
    9343             :  * acquired. For full updates case which removes/adds/updates streams on one
    9344             :  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
    9345             :  * that any such full update commit will wait for completion of any outstanding
    9346             :  * flip using DRMs synchronization events.
    9347             :  *
    9348             :  * Note that DM adds the affected connectors for all CRTCs in state, when that
    9349             :  * might not seem necessary. This is because DC stream creation requires the
    9350             :  * DC sink, which is tied to the DRM connector state. Cleaning this up should
    9351             :  * be possible but non-trivial - a possible TODO item.
    9352             :  *
    9353             :  * Return: -Error code if validation failed.
    9354             :  */
    9355           0 : static int amdgpu_dm_atomic_check(struct drm_device *dev,
    9356             :                                   struct drm_atomic_state *state)
    9357             : {
    9358           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    9359           0 :         struct dm_atomic_state *dm_state = NULL;
    9360           0 :         struct dc *dc = adev->dm.dc;
    9361             :         struct drm_connector *connector;
    9362             :         struct drm_connector_state *old_con_state, *new_con_state;
    9363             :         struct drm_crtc *crtc;
    9364             :         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
    9365             :         struct drm_plane *plane;
    9366             :         struct drm_plane_state *old_plane_state, *new_plane_state;
    9367             :         enum dc_status status;
    9368             :         int ret, i;
    9369           0 :         bool lock_and_validation_needed = false;
    9370             :         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
    9371             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    9372             :         struct dsc_mst_fairness_vars vars[MAX_PIPES];
    9373             :         struct drm_dp_mst_topology_state *mst_state;
    9374             :         struct drm_dp_mst_topology_mgr *mgr;
    9375             : #endif
    9376             : 
    9377           0 :         trace_amdgpu_dm_atomic_check_begin(state);
    9378             : 
    9379           0 :         ret = drm_atomic_helper_check_modeset(dev, state);
    9380           0 :         if (ret) {
    9381           0 :                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
    9382           0 :                 goto fail;
    9383             :         }
    9384             : 
    9385             :         /* Check connector changes */
    9386           0 :         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
    9387           0 :                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
    9388           0 :                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
    9389             : 
    9390             :                 /* Skip connectors that are disabled or part of modeset already. */
    9391           0 :                 if (!new_con_state->crtc)
    9392           0 :                         continue;
    9393             : 
    9394           0 :                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
    9395           0 :                 if (IS_ERR(new_crtc_state)) {
    9396           0 :                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
    9397           0 :                         ret = PTR_ERR(new_crtc_state);
    9398           0 :                         goto fail;
    9399             :                 }
    9400             : 
    9401           0 :                 if (dm_old_con_state->abm_level !=
    9402           0 :                     dm_new_con_state->abm_level)
    9403           0 :                         new_crtc_state->connectors_changed = true;
    9404             :         }
    9405             : 
    9406             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    9407           0 :         if (dc_resource_is_dsc_encoding_supported(dc)) {
    9408           0 :                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
    9409           0 :                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
    9410           0 :                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
    9411           0 :                                 if (ret) {
    9412           0 :                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
    9413           0 :                                         goto fail;
    9414             :                                 }
    9415             :                         }
    9416             :                 }
    9417           0 :                 if (!pre_validate_dsc(state, &dm_state, vars)) {
    9418             :                         ret = -EINVAL;
    9419             :                         goto fail;
    9420             :                 }
    9421             :         }
    9422             : #endif
    9423           0 :         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
    9424           0 :                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
    9425             : 
    9426           0 :                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
    9427           0 :                     !new_crtc_state->color_mgmt_changed &&
    9428           0 :                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
    9429           0 :                         dm_old_crtc_state->dsc_force_changed == false)
    9430           0 :                         continue;
    9431             : 
    9432           0 :                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
    9433           0 :                 if (ret) {
    9434           0 :                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
    9435           0 :                         goto fail;
    9436             :                 }
    9437             : 
    9438           0 :                 if (!new_crtc_state->enable)
    9439           0 :                         continue;
    9440             : 
    9441           0 :                 ret = drm_atomic_add_affected_connectors(state, crtc);
    9442           0 :                 if (ret) {
    9443           0 :                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
    9444           0 :                         goto fail;
    9445             :                 }
    9446             : 
    9447           0 :                 ret = drm_atomic_add_affected_planes(state, crtc);
    9448           0 :                 if (ret) {
    9449           0 :                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
    9450           0 :                         goto fail;
    9451             :                 }
    9452             : 
    9453           0 :                 if (dm_old_crtc_state->dsc_force_changed)
    9454           0 :                         new_crtc_state->mode_changed = true;
    9455             :         }
    9456             : 
    9457             :         /*
    9458             :          * Add all primary and overlay planes on the CRTC to the state
    9459             :          * whenever a plane is enabled to maintain correct z-ordering
    9460             :          * and to enable fast surface updates.
    9461             :          */
    9462           0 :         drm_for_each_crtc(crtc, dev) {
    9463             :                 bool modified = false;
    9464             : 
    9465           0 :                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
    9466           0 :                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
    9467           0 :                                 continue;
    9468             : 
    9469           0 :                         if (new_plane_state->crtc == crtc ||
    9470           0 :                             old_plane_state->crtc == crtc) {
    9471             :                                 modified = true;
    9472             :                                 break;
    9473             :                         }
    9474             :                 }
    9475             : 
    9476           0 :                 if (!modified)
    9477           0 :                         continue;
    9478             : 
    9479           0 :                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
    9480           0 :                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
    9481           0 :                                 continue;
    9482             : 
    9483           0 :                         new_plane_state =
    9484             :                                 drm_atomic_get_plane_state(state, plane);
    9485             : 
    9486           0 :                         if (IS_ERR(new_plane_state)) {
    9487           0 :                                 ret = PTR_ERR(new_plane_state);
    9488           0 :                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
    9489           0 :                                 goto fail;
    9490             :                         }
    9491             :                 }
    9492             :         }
    9493             : 
    9494             :         /* Remove exiting planes if they are modified */
    9495           0 :         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
    9496           0 :                 ret = dm_update_plane_state(dc, state, plane,
    9497             :                                             old_plane_state,
    9498             :                                             new_plane_state,
    9499             :                                             false,
    9500             :                                             &lock_and_validation_needed);
    9501           0 :                 if (ret) {
    9502           0 :                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
    9503           0 :                         goto fail;
    9504             :                 }
    9505             :         }
    9506             : 
    9507             :         /* Disable all crtcs which require disable */
    9508           0 :         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
    9509           0 :                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
    9510             :                                            old_crtc_state,
    9511             :                                            new_crtc_state,
    9512             :                                            false,
    9513             :                                            &lock_and_validation_needed);
    9514           0 :                 if (ret) {
    9515           0 :                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
    9516           0 :                         goto fail;
    9517             :                 }
    9518             :         }
    9519             : 
    9520             :         /* Enable all crtcs which require enable */
    9521           0 :         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
    9522           0 :                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
    9523             :                                            old_crtc_state,
    9524             :                                            new_crtc_state,
    9525             :                                            true,
    9526             :                                            &lock_and_validation_needed);
    9527           0 :                 if (ret) {
    9528           0 :                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
    9529           0 :                         goto fail;
    9530             :                 }
    9531             :         }
    9532             : 
    9533             :         /* Add new/modified planes */
    9534           0 :         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
    9535           0 :                 ret = dm_update_plane_state(dc, state, plane,
    9536             :                                             old_plane_state,
    9537             :                                             new_plane_state,
    9538             :                                             true,
    9539             :                                             &lock_and_validation_needed);
    9540           0 :                 if (ret) {
    9541           0 :                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
    9542           0 :                         goto fail;
    9543             :                 }
    9544             :         }
    9545             : 
    9546             :         /* Run this here since we want to validate the streams we created */
    9547           0 :         ret = drm_atomic_helper_check_planes(dev, state);
    9548           0 :         if (ret) {
    9549           0 :                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
    9550           0 :                 goto fail;
    9551             :         }
    9552             : 
    9553           0 :         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
    9554           0 :                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
    9555           0 :                 if (dm_new_crtc_state->mpo_requested)
    9556           0 :                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
    9557             :         }
    9558             : 
    9559             :         /* Check cursor planes scaling */
    9560           0 :         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
    9561           0 :                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
    9562           0 :                 if (ret) {
    9563           0 :                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
    9564           0 :                         goto fail;
    9565             :                 }
    9566             :         }
    9567             : 
    9568           0 :         if (state->legacy_cursor_update) {
    9569             :                 /*
    9570             :                  * This is a fast cursor update coming from the plane update
    9571             :                  * helper, check if it can be done asynchronously for better
    9572             :                  * performance.
    9573             :                  */
    9574           0 :                 state->async_update =
    9575           0 :                         !drm_atomic_helper_async_check(dev, state);
    9576             : 
    9577             :                 /*
    9578             :                  * Skip the remaining global validation if this is an async
    9579             :                  * update. Cursor updates can be done without affecting
    9580             :                  * state or bandwidth calcs and this avoids the performance
    9581             :                  * penalty of locking the private state object and
    9582             :                  * allocating a new dc_state.
    9583             :                  */
    9584           0 :                 if (state->async_update)
    9585             :                         return 0;
    9586             :         }
    9587             : 
    9588             :         /* Check scaling and underscan changes*/
    9589             :         /* TODO Removed scaling changes validation due to inability to commit
    9590             :          * new stream into context w\o causing full reset. Need to
    9591             :          * decide how to handle.
    9592             :          */
    9593           0 :         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
    9594           0 :                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
    9595           0 :                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
    9596           0 :                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
    9597             : 
    9598             :                 /* Skip any modesets/resets */
    9599           0 :                 if (!acrtc || drm_atomic_crtc_needs_modeset(
    9600           0 :                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
    9601           0 :                         continue;
    9602             : 
    9603             :                 /* Skip any thing not scale or underscan changes */
    9604           0 :                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
    9605           0 :                         continue;
    9606             : 
    9607           0 :                 lock_and_validation_needed = true;
    9608             :         }
    9609             : 
    9610             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    9611             :         /* set the slot info for each mst_state based on the link encoding format */
    9612           0 :         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
    9613             :                 struct amdgpu_dm_connector *aconnector;
    9614             :                 struct drm_connector *connector;
    9615             :                 struct drm_connector_list_iter iter;
    9616             :                 u8 link_coding_cap;
    9617             : 
    9618           0 :                 if (!mgr->mst_state )
    9619           0 :                         continue;
    9620             : 
    9621           0 :                 drm_connector_list_iter_begin(dev, &iter);
    9622           0 :                 drm_for_each_connector_iter(connector, &iter) {
    9623           0 :                         int id = connector->index;
    9624             : 
    9625           0 :                         if (id == mst_state->mgr->conn_base_id) {
    9626           0 :                                 aconnector = to_amdgpu_dm_connector(connector);
    9627           0 :                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
    9628           0 :                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
    9629             : 
    9630           0 :                                 break;
    9631             :                         }
    9632             :                 }
    9633           0 :                 drm_connector_list_iter_end(&iter);
    9634             : 
    9635             :         }
    9636             : #endif
    9637             :         /**
    9638             :          * Streams and planes are reset when there are changes that affect
    9639             :          * bandwidth. Anything that affects bandwidth needs to go through
    9640             :          * DC global validation to ensure that the configuration can be applied
    9641             :          * to hardware.
    9642             :          *
    9643             :          * We have to currently stall out here in atomic_check for outstanding
    9644             :          * commits to finish in this case because our IRQ handlers reference
    9645             :          * DRM state directly - we can end up disabling interrupts too early
    9646             :          * if we don't.
    9647             :          *
    9648             :          * TODO: Remove this stall and drop DM state private objects.
    9649             :          */
    9650           0 :         if (lock_and_validation_needed) {
    9651           0 :                 ret = dm_atomic_get_state(state, &dm_state);
    9652           0 :                 if (ret) {
    9653           0 :                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
    9654           0 :                         goto fail;
    9655             :                 }
    9656             : 
    9657           0 :                 ret = do_aquire_global_lock(dev, state);
    9658           0 :                 if (ret) {
    9659           0 :                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
    9660           0 :                         goto fail;
    9661             :                 }
    9662             : 
    9663             : #if defined(CONFIG_DRM_AMD_DC_DCN)
    9664           0 :                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
    9665           0 :                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
    9666           0 :                         ret = -EINVAL;
    9667           0 :                         goto fail;
    9668             :                 }
    9669             : 
    9670           0 :                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
    9671           0 :                 if (ret) {
    9672           0 :                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
    9673           0 :                         goto fail;
    9674             :                 }
    9675             : #endif
    9676             : 
    9677             :                 /*
    9678             :                  * Perform validation of MST topology in the state:
    9679             :                  * We need to perform MST atomic check before calling
    9680             :                  * dc_validate_global_state(), or there is a chance
    9681             :                  * to get stuck in an infinite loop and hang eventually.
    9682             :                  */
    9683           0 :                 ret = drm_dp_mst_atomic_check(state);
    9684           0 :                 if (ret) {
    9685           0 :                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
    9686           0 :                         goto fail;
    9687             :                 }
    9688           0 :                 status = dc_validate_global_state(dc, dm_state->context, true);
    9689           0 :                 if (status != DC_OK) {
    9690           0 :                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
    9691             :                                        dc_status_to_str(status), status);
    9692           0 :                         ret = -EINVAL;
    9693           0 :                         goto fail;
    9694             :                 }
    9695             :         } else {
    9696             :                 /*
    9697             :                  * The commit is a fast update. Fast updates shouldn't change
    9698             :                  * the DC context, affect global validation, and can have their
    9699             :                  * commit work done in parallel with other commits not touching
    9700             :                  * the same resource. If we have a new DC context as part of
    9701             :                  * the DM atomic state from validation we need to free it and
    9702             :                  * retain the existing one instead.
    9703             :                  *
    9704             :                  * Furthermore, since the DM atomic state only contains the DC
    9705             :                  * context and can safely be annulled, we can free the state
    9706             :                  * and clear the associated private object now to free
    9707             :                  * some memory and avoid a possible use-after-free later.
    9708             :                  */
    9709             : 
    9710           0 :                 for (i = 0; i < state->num_private_objs; i++) {
    9711           0 :                         struct drm_private_obj *obj = state->private_objs[i].ptr;
    9712             : 
    9713           0 :                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
    9714           0 :                                 int j = state->num_private_objs-1;
    9715             : 
    9716           0 :                                 dm_atomic_destroy_state(obj,
    9717             :                                                 state->private_objs[i].state);
    9718             : 
    9719             :                                 /* If i is not at the end of the array then the
    9720             :                                  * last element needs to be moved to where i was
    9721             :                                  * before the array can safely be truncated.
    9722             :                                  */
    9723           0 :                                 if (i != j)
    9724           0 :                                         state->private_objs[i] =
    9725           0 :                                                 state->private_objs[j];
    9726             : 
    9727           0 :                                 state->private_objs[j].ptr = NULL;
    9728           0 :                                 state->private_objs[j].state = NULL;
    9729           0 :                                 state->private_objs[j].old_state = NULL;
    9730           0 :                                 state->private_objs[j].new_state = NULL;
    9731             : 
    9732           0 :                                 state->num_private_objs = j;
    9733           0 :                                 break;
    9734             :                         }
    9735             :                 }
    9736             :         }
    9737             : 
    9738             :         /* Store the overall update type for use later in atomic check. */
    9739           0 :         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
    9740           0 :                 struct dm_crtc_state *dm_new_crtc_state =
    9741           0 :                         to_dm_crtc_state(new_crtc_state);
    9742             : 
    9743           0 :                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
    9744           0 :                                                          UPDATE_TYPE_FULL :
    9745             :                                                          UPDATE_TYPE_FAST;
    9746             :         }
    9747             : 
    9748             :         /* Must be success */
    9749           0 :         WARN_ON(ret);
    9750             : 
    9751             :         trace_amdgpu_dm_atomic_check_finish(state, ret);
    9752             : 
    9753             :         return ret;
    9754             : 
    9755             : fail:
    9756           0 :         if (ret == -EDEADLK)
    9757           0 :                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
    9758           0 :         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
    9759           0 :                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
    9760             :         else
    9761           0 :                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
    9762             : 
    9763             :         trace_amdgpu_dm_atomic_check_finish(state, ret);
    9764             : 
    9765             :         return ret;
    9766             : }
    9767             : 
    9768           0 : static bool is_dp_capable_without_timing_msa(struct dc *dc,
    9769             :                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
    9770             : {
    9771             :         uint8_t dpcd_data;
    9772           0 :         bool capable = false;
    9773             : 
    9774           0 :         if (amdgpu_dm_connector->dc_link &&
    9775           0 :                 dm_helpers_dp_read_dpcd(
    9776             :                                 NULL,
    9777             :                                 amdgpu_dm_connector->dc_link,
    9778             :                                 DP_DOWN_STREAM_PORT_COUNT,
    9779             :                                 &dpcd_data,
    9780             :                                 sizeof(dpcd_data))) {
    9781           0 :                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
    9782             :         }
    9783             : 
    9784           0 :         return capable;
    9785             : }
    9786             : 
    9787           0 : static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
    9788             :                 unsigned int offset,
    9789             :                 unsigned int total_length,
    9790             :                 uint8_t *data,
    9791             :                 unsigned int length,
    9792             :                 struct amdgpu_hdmi_vsdb_info *vsdb)
    9793             : {
    9794             :         bool res;
    9795             :         union dmub_rb_cmd cmd;
    9796             :         struct dmub_cmd_send_edid_cea *input;
    9797             :         struct dmub_cmd_edid_cea_output *output;
    9798             : 
    9799           0 :         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
    9800             :                 return false;
    9801             : 
    9802           0 :         memset(&cmd, 0, sizeof(cmd));
    9803             : 
    9804           0 :         input = &cmd.edid_cea.data.input;
    9805             : 
    9806           0 :         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
    9807           0 :         cmd.edid_cea.header.sub_type = 0;
    9808           0 :         cmd.edid_cea.header.payload_bytes =
    9809             :                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
    9810           0 :         input->offset = offset;
    9811           0 :         input->length = length;
    9812           0 :         input->cea_total_length = total_length;
    9813           0 :         memcpy(input->payload, data, length);
    9814             : 
    9815           0 :         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
    9816           0 :         if (!res) {
    9817           0 :                 DRM_ERROR("EDID CEA parser failed\n");
    9818             :                 return false;
    9819             :         }
    9820             : 
    9821           0 :         output = &cmd.edid_cea.data.output;
    9822             : 
    9823           0 :         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
    9824           0 :                 if (!output->ack.success) {
    9825           0 :                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
    9826             :                                         output->ack.offset);
    9827             :                 }
    9828           0 :         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
    9829           0 :                 if (!output->amd_vsdb.vsdb_found)
    9830             :                         return false;
    9831             : 
    9832           0 :                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
    9833           0 :                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
    9834           0 :                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
    9835           0 :                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
    9836             :         } else {
    9837           0 :                 DRM_WARN("Unknown EDID CEA parser results\n");
    9838             :                 return false;
    9839             :         }
    9840             : 
    9841             :         return true;
    9842             : }
    9843             : 
    9844           0 : static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
    9845             :                 uint8_t *edid_ext, int len,
    9846             :                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
    9847             : {
    9848             :         int i;
    9849             : 
    9850             :         /* send extension block to DMCU for parsing */
    9851           0 :         for (i = 0; i < len; i += 8) {
    9852             :                 bool res;
    9853             :                 int offset;
    9854             : 
    9855             :                 /* send 8 bytes a time */
    9856           0 :                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
    9857           0 :                         return false;
    9858             : 
    9859           0 :                 if (i+8 == len) {
    9860             :                         /* EDID block sent completed, expect result */
    9861             :                         int version, min_rate, max_rate;
    9862             : 
    9863           0 :                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
    9864           0 :                         if (res) {
    9865             :                                 /* amd vsdb found */
    9866           0 :                                 vsdb_info->freesync_supported = 1;
    9867           0 :                                 vsdb_info->amd_vsdb_version = version;
    9868           0 :                                 vsdb_info->min_refresh_rate_hz = min_rate;
    9869           0 :                                 vsdb_info->max_refresh_rate_hz = max_rate;
    9870             :                                 return true;
    9871             :                         }
    9872             :                         /* not amd vsdb */
    9873             :                         return false;
    9874             :                 }
    9875             : 
    9876             :                 /* check for ack*/
    9877           0 :                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
    9878           0 :                 if (!res)
    9879             :                         return false;
    9880             :         }
    9881             : 
    9882             :         return false;
    9883             : }
    9884             : 
    9885           0 : static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
    9886             :                 uint8_t *edid_ext, int len,
    9887             :                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
    9888             : {
    9889             :         int i;
    9890             : 
    9891             :         /* send extension block to DMCU for parsing */
    9892           0 :         for (i = 0; i < len; i += 8) {
    9893             :                 /* send 8 bytes a time */
    9894           0 :                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
    9895             :                         return false;
    9896             :         }
    9897             : 
    9898           0 :         return vsdb_info->freesync_supported;
    9899             : }
    9900             : 
    9901           0 : static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
    9902             :                 uint8_t *edid_ext, int len,
    9903             :                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
    9904             : {
    9905           0 :         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
    9906             : 
    9907           0 :         if (adev->dm.dmub_srv)
    9908           0 :                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
    9909             :         else
    9910           0 :                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
    9911             : }
    9912             : 
    9913           0 : static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
    9914             :                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
    9915             : {
    9916           0 :         uint8_t *edid_ext = NULL;
    9917             :         int i;
    9918           0 :         bool valid_vsdb_found = false;
    9919             : 
    9920             :         /*----- drm_find_cea_extension() -----*/
    9921             :         /* No EDID or EDID extensions */
    9922           0 :         if (edid == NULL || edid->extensions == 0)
    9923             :                 return -ENODEV;
    9924             : 
    9925             :         /* Find CEA extension */
    9926           0 :         for (i = 0; i < edid->extensions; i++) {
    9927           0 :                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
    9928           0 :                 if (edid_ext[0] == CEA_EXT)
    9929             :                         break;
    9930             :         }
    9931             : 
    9932           0 :         if (i == edid->extensions)
    9933             :                 return -ENODEV;
    9934             : 
    9935             :         /*----- cea_db_offsets() -----*/
    9936           0 :         if (edid_ext[0] != CEA_EXT)
    9937             :                 return -ENODEV;
    9938             : 
    9939           0 :         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
    9940             : 
    9941           0 :         return valid_vsdb_found ? i : -ENODEV;
    9942             : }
    9943             : 
    9944             : /**
    9945             :  * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
    9946             :  *
    9947             :  * @connector: Connector to query.
    9948             :  * @edid: EDID from monitor
    9949             :  *
    9950             :  * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
    9951             :  * track of some of the display information in the internal data struct used by
    9952             :  * amdgpu_dm. This function checks which type of connector we need to set the
    9953             :  * FreeSync parameters.
    9954             :  */
    9955           0 : void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
    9956             :                                     struct edid *edid)
    9957             : {
    9958           0 :         int i = 0;
    9959             :         struct detailed_timing *timing;
    9960             :         struct detailed_non_pixel *data;
    9961             :         struct detailed_data_monitor_range *range;
    9962           0 :         struct amdgpu_dm_connector *amdgpu_dm_connector =
    9963           0 :                         to_amdgpu_dm_connector(connector);
    9964           0 :         struct dm_connector_state *dm_con_state = NULL;
    9965             :         struct dc_sink *sink;
    9966             : 
    9967           0 :         struct drm_device *dev = connector->dev;
    9968           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
    9969           0 :         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
    9970           0 :         bool freesync_capable = false;
    9971             : 
    9972           0 :         if (!connector->state) {
    9973           0 :                 DRM_ERROR("%s - Connector has no state", __func__);
    9974           0 :                 goto update;
    9975             :         }
    9976             : 
    9977           0 :         sink = amdgpu_dm_connector->dc_sink ?
    9978           0 :                 amdgpu_dm_connector->dc_sink :
    9979             :                 amdgpu_dm_connector->dc_em_sink;
    9980             : 
    9981           0 :         if (!edid || !sink) {
    9982           0 :                 dm_con_state = to_dm_connector_state(connector->state);
    9983             : 
    9984           0 :                 amdgpu_dm_connector->min_vfreq = 0;
    9985           0 :                 amdgpu_dm_connector->max_vfreq = 0;
    9986           0 :                 amdgpu_dm_connector->pixel_clock_mhz = 0;
    9987           0 :                 connector->display_info.monitor_range.min_vfreq = 0;
    9988           0 :                 connector->display_info.monitor_range.max_vfreq = 0;
    9989           0 :                 freesync_capable = false;
    9990             : 
    9991           0 :                 goto update;
    9992             :         }
    9993             : 
    9994           0 :         dm_con_state = to_dm_connector_state(connector->state);
    9995             : 
    9996           0 :         if (!adev->dm.freesync_module)
    9997             :                 goto update;
    9998             : 
    9999           0 :         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
   10000           0 :                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
   10001           0 :                 bool edid_check_required = false;
   10002             : 
   10003           0 :                 if (edid) {
   10004           0 :                         edid_check_required = is_dp_capable_without_timing_msa(
   10005             :                                                 adev->dm.dc,
   10006             :                                                 amdgpu_dm_connector);
   10007             :                 }
   10008             : 
   10009           0 :                 if (edid_check_required == true && (edid->version > 1 ||
   10010           0 :                    (edid->version == 1 && edid->revision > 1))) {
   10011           0 :                         for (i = 0; i < 4; i++) {
   10012             : 
   10013           0 :                                 timing  = &edid->detailed_timings[i];
   10014           0 :                                 data    = &timing->data.other_data;
   10015           0 :                                 range   = &data->data.range;
   10016             :                                 /*
   10017             :                                  * Check if monitor has continuous frequency mode
   10018             :                                  */
   10019           0 :                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
   10020           0 :                                         continue;
   10021             :                                 /*
   10022             :                                  * Check for flag range limits only. If flag == 1 then
   10023             :                                  * no additional timing information provided.
   10024             :                                  * Default GTF, GTF Secondary curve and CVT are not
   10025             :                                  * supported
   10026             :                                  */
   10027           0 :                                 if (range->flags != 1)
   10028           0 :                                         continue;
   10029             : 
   10030           0 :                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
   10031           0 :                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
   10032           0 :                                 amdgpu_dm_connector->pixel_clock_mhz =
   10033           0 :                                         range->pixel_clock_mhz * 10;
   10034             : 
   10035           0 :                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
   10036           0 :                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
   10037             : 
   10038           0 :                                 break;
   10039             :                         }
   10040             : 
   10041           0 :                         if (amdgpu_dm_connector->max_vfreq -
   10042           0 :                             amdgpu_dm_connector->min_vfreq > 10) {
   10043             : 
   10044           0 :                                 freesync_capable = true;
   10045             :                         }
   10046             :                 }
   10047           0 :         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
   10048           0 :                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
   10049           0 :                 if (i >= 0 && vsdb_info.freesync_supported) {
   10050           0 :                         timing  = &edid->detailed_timings[i];
   10051           0 :                         data    = &timing->data.other_data;
   10052             : 
   10053           0 :                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
   10054           0 :                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
   10055           0 :                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
   10056           0 :                                 freesync_capable = true;
   10057             : 
   10058           0 :                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
   10059           0 :                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
   10060             :                 }
   10061             :         }
   10062             : 
   10063             : update:
   10064           0 :         if (dm_con_state)
   10065           0 :                 dm_con_state->freesync_capable = freesync_capable;
   10066             : 
   10067           0 :         if (connector->vrr_capable_property)
   10068           0 :                 drm_connector_set_vrr_capable_property(connector,
   10069             :                                                        freesync_capable);
   10070           0 : }
   10071             : 
   10072           0 : void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
   10073             : {
   10074           0 :         struct amdgpu_device *adev = drm_to_adev(dev);
   10075           0 :         struct dc *dc = adev->dm.dc;
   10076             :         int i;
   10077             : 
   10078           0 :         mutex_lock(&adev->dm.dc_lock);
   10079           0 :         if (dc->current_state) {
   10080           0 :                 for (i = 0; i < dc->current_state->stream_count; ++i)
   10081           0 :                         dc->current_state->streams[i]
   10082           0 :                                 ->triggered_crtc_reset.enabled =
   10083           0 :                                 adev->dm.force_timing_sync;
   10084             : 
   10085           0 :                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
   10086           0 :                 dc_trigger_sync(dc, dc->current_state);
   10087             :         }
   10088           0 :         mutex_unlock(&adev->dm.dc_lock);
   10089           0 : }
   10090             : 
   10091           0 : void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
   10092             :                        uint32_t value, const char *func_name)
   10093             : {
   10094             : #ifdef DM_CHECK_ADDR_0
   10095             :         if (address == 0) {
   10096             :                 DC_ERR("invalid register write. address = 0");
   10097             :                 return;
   10098             :         }
   10099             : #endif
   10100           0 :         cgs_write_register(ctx->cgs_device, address, value);
   10101           0 :         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
   10102           0 : }
   10103             : 
   10104           0 : uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
   10105             :                           const char *func_name)
   10106             : {
   10107             :         uint32_t value;
   10108             : #ifdef DM_CHECK_ADDR_0
   10109             :         if (address == 0) {
   10110             :                 DC_ERR("invalid register read; address = 0\n");
   10111             :                 return 0;
   10112             :         }
   10113             : #endif
   10114             : 
   10115           0 :         if (ctx->dmub_srv &&
   10116           0 :             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
   10117           0 :             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
   10118           0 :                 ASSERT(false);
   10119             :                 return 0;
   10120             :         }
   10121             : 
   10122           0 :         value = cgs_read_register(ctx->cgs_device, address);
   10123             : 
   10124           0 :         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
   10125             : 
   10126           0 :         return value;
   10127             : }
   10128             : 
   10129             : static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
   10130             :                                                 struct dc_context *ctx,
   10131             :                                                 uint8_t status_type,
   10132             :                                                 uint32_t *operation_result)
   10133             : {
   10134           0 :         struct amdgpu_device *adev = ctx->driver_context;
   10135           0 :         int return_status = -1;
   10136           0 :         struct dmub_notification *p_notify = adev->dm.dmub_notify;
   10137             : 
   10138           0 :         if (is_cmd_aux) {
   10139             :                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
   10140           0 :                         return_status = p_notify->aux_reply.length;
   10141           0 :                         *operation_result = p_notify->result;
   10142             :                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
   10143           0 :                         *operation_result = AUX_RET_ERROR_TIMEOUT;
   10144             :                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
   10145             :                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
   10146             :                 } else {
   10147             :                         *operation_result = AUX_RET_ERROR_UNKNOWN;
   10148             :                 }
   10149             :         } else {
   10150             :                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
   10151           0 :                         return_status = 0;
   10152           0 :                         *operation_result = p_notify->sc_status;
   10153             :                 } else {
   10154           0 :                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
   10155             :                 }
   10156             :         }
   10157             : 
   10158             :         return return_status;
   10159             : }
   10160             : 
   10161           0 : int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
   10162             :         unsigned int link_index, void *cmd_payload, void *operation_result)
   10163             : {
   10164           0 :         struct amdgpu_device *adev = ctx->driver_context;
   10165           0 :         int ret = 0;
   10166             : 
   10167           0 :         if (is_cmd_aux) {
   10168           0 :                 dc_process_dmub_aux_transfer_async(ctx->dc,
   10169             :                         link_index, (struct aux_payload *)cmd_payload);
   10170           0 :         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
   10171             :                                         (struct set_config_cmd_payload *)cmd_payload,
   10172             :                                         adev->dm.dmub_notify)) {
   10173           0 :                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
   10174             :                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
   10175             :                                         (uint32_t *)operation_result);
   10176             :         }
   10177             : 
   10178           0 :         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
   10179           0 :         if (ret == 0) {
   10180           0 :                 DRM_ERROR("wait_for_completion_timeout timeout!");
   10181             :                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
   10182             :                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
   10183             :                                 (uint32_t *)operation_result);
   10184             :         }
   10185             : 
   10186           0 :         if (is_cmd_aux) {
   10187           0 :                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
   10188           0 :                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
   10189             : 
   10190           0 :                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
   10191           0 :                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
   10192           0 :                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
   10193           0 :                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
   10194             :                                        adev->dm.dmub_notify->aux_reply.length);
   10195             :                         }
   10196             :                 }
   10197             :         }
   10198             : 
   10199             :         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
   10200             :                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
   10201             :                         (uint32_t *)operation_result);
   10202             : }
   10203             : 
   10204             : /*
   10205             :  * Check whether seamless boot is supported.
   10206             :  *
   10207             :  * So far we only support seamless boot on CHIP_VANGOGH.
   10208             :  * If everything goes well, we may consider expanding
   10209             :  * seamless boot to other ASICs.
   10210             :  */
   10211           0 : bool check_seamless_boot_capability(struct amdgpu_device *adev)
   10212             : {
   10213           0 :         switch (adev->asic_type) {
   10214             :         case CHIP_VANGOGH:
   10215           0 :                 if (!adev->mman.keep_stolen_vga_memory)
   10216             :                         return true;
   10217             :                 break;
   10218             :         default:
   10219             :                 break;
   10220             :         }
   10221             : 
   10222           0 :         return false;
   10223             : }

Generated by: LCOV version 1.14