1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42 #if defined(CONFIG_DEBUG_FS)
43 #include "amdgpu_dm_debugfs.h"
44 #endif
45
46 #include "ivsrcid/ivsrcid_vislands30.h"
47
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #if 0
51 #include <linux/version.h>
52 #endif
53 #include <linux/types.h>
54 #include <linux/pm_runtime.h>
55
56 #include <drm/drmP.h>
57 #include <drm/drm_atomic.h>
58 #include <drm/drm_atomic_helper.h>
59 #include <drm/drm_dp_mst_helper.h>
60 #include <drm/drm_fb_helper.h>
61 #include <drm/drm_edid.h>
62
63 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
64 #include "ivsrcid/irqsrcs_dcn_1_0.h"
65
66 #include "dcn/dcn_1_0_offset.h"
67 #include "dcn/dcn_1_0_sh_mask.h"
68 #include "soc15_hw_ip.h"
69 #include "vega10_ip_offset.h"
70
71 #include "soc15_common.h"
72 #endif
73
74 #include "modules/inc/mod_freesync.h"
75
76 #include "i2caux_interface.h"
77
78 /* basic init/fini API */
79 static int amdgpu_dm_init(struct amdgpu_device *adev);
80 static void amdgpu_dm_fini(struct amdgpu_device *adev);
81
82 /* initializes drm_device display related structures, based on the information
83 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
84 * drm_encoder, drm_mode_config
85 *
86 * Returns 0 on success
87 */
88 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
89 /* removes and deallocates the drm structures, created by the above function */
90 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
91
92 static void
93 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
94
95 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
96 struct amdgpu_plane *aplane,
97 unsigned long possible_crtcs);
98 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
99 struct drm_plane *plane,
100 uint32_t link_index);
101 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
102 struct amdgpu_dm_connector *amdgpu_dm_connector,
103 uint32_t link_index,
104 struct amdgpu_encoder *amdgpu_encoder);
105 static int amdgpu_dm_encoder_init(struct drm_device *dev,
106 struct amdgpu_encoder *aencoder,
107 uint32_t link_index);
108
109 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
110
111 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
112 struct drm_atomic_state *state,
113 bool nonblock);
114
115 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
116
117 static int amdgpu_dm_atomic_check(struct drm_device *dev,
118 struct drm_atomic_state *state);
119
120
121
122
123 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
124 DRM_PLANE_TYPE_PRIMARY,
125 DRM_PLANE_TYPE_PRIMARY,
126 DRM_PLANE_TYPE_PRIMARY,
127 DRM_PLANE_TYPE_PRIMARY,
128 DRM_PLANE_TYPE_PRIMARY,
129 DRM_PLANE_TYPE_PRIMARY,
130 };
131
132 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
133 DRM_PLANE_TYPE_PRIMARY,
134 DRM_PLANE_TYPE_PRIMARY,
135 DRM_PLANE_TYPE_PRIMARY,
136 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
137 };
138
139 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
140 DRM_PLANE_TYPE_PRIMARY,
141 DRM_PLANE_TYPE_PRIMARY,
142 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
143 };
144
145 /*
146 * dm_vblank_get_counter
147 *
148 * @brief
149 * Get counter for number of vertical blanks
150 *
151 * @param
152 * struct amdgpu_device *adev - [in] desired amdgpu device
153 * int disp_idx - [in] which CRTC to get the counter from
154 *
155 * @return
156 * Counter for vertical blanks
157 */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)158 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
159 {
160 if (crtc >= adev->mode_info.num_crtc)
161 return 0;
162 else {
163 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
164 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
165 acrtc->base.state);
166
167
168 if (acrtc_state->stream == NULL) {
169 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
170 crtc);
171 return 0;
172 }
173
174 return dc_stream_get_vblank_counter(acrtc_state->stream);
175 }
176 }
177
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)178 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
179 u32 *vbl, u32 *position)
180 {
181 uint32_t v_blank_start, v_blank_end, h_position, v_position;
182
183 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
184 return -EINVAL;
185 else {
186 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
187 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
188 acrtc->base.state);
189
190 if (acrtc_state->stream == NULL) {
191 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
192 crtc);
193 return 0;
194 }
195
196 /*
197 * TODO rework base driver to use values directly.
198 * for now parse it back into reg-format
199 */
200 dc_stream_get_scanoutpos(acrtc_state->stream,
201 &v_blank_start,
202 &v_blank_end,
203 &h_position,
204 &v_position);
205
206 *position = v_position | (h_position << 16);
207 *vbl = v_blank_start | (v_blank_end << 16);
208 }
209
210 return 0;
211 }
212
dm_is_idle(void * handle)213 static bool dm_is_idle(void *handle)
214 {
215 /* XXX todo */
216 return true;
217 }
218
dm_wait_for_idle(void * handle)219 static int dm_wait_for_idle(void *handle)
220 {
221 /* XXX todo */
222 return 0;
223 }
224
dm_check_soft_reset(void * handle)225 static bool dm_check_soft_reset(void *handle)
226 {
227 return false;
228 }
229
dm_soft_reset(void * handle)230 static int dm_soft_reset(void *handle)
231 {
232 /* XXX todo */
233 return 0;
234 }
235
236 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)237 get_crtc_by_otg_inst(struct amdgpu_device *adev,
238 int otg_inst)
239 {
240 struct drm_device *dev = adev->ddev;
241 struct drm_crtc *crtc;
242 struct amdgpu_crtc *amdgpu_crtc;
243
244 /*
245 * following if is check inherited from both functions where this one is
246 * used now. Need to be checked why it could happen.
247 */
248 if (otg_inst == -1) {
249 WARN_ON(1);
250 return adev->mode_info.crtcs[0];
251 }
252
253 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
254 amdgpu_crtc = to_amdgpu_crtc(crtc);
255
256 if (amdgpu_crtc->otg_inst == otg_inst)
257 return amdgpu_crtc;
258 }
259
260 return NULL;
261 }
262
dm_pflip_high_irq(void * interrupt_params)263 static void dm_pflip_high_irq(void *interrupt_params)
264 {
265 struct amdgpu_crtc *amdgpu_crtc;
266 struct common_irq_params *irq_params = interrupt_params;
267 struct amdgpu_device *adev = irq_params->adev;
268 unsigned long flags;
269
270 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
271
272 /* IRQ could occur when in initial stage */
273 /*TODO work and BO cleanup */
274 if (amdgpu_crtc == NULL) {
275 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
276 return;
277 }
278
279 spin_lock_irqsave(&adev->ddev->event_lock, flags);
280
281 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
282 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
283 amdgpu_crtc->pflip_status,
284 AMDGPU_FLIP_SUBMITTED,
285 amdgpu_crtc->crtc_id,
286 amdgpu_crtc);
287 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
288 return;
289 }
290
291
292 /* wakeup usersapce */
293 if (amdgpu_crtc->event) {
294 /* Update to correct count/ts if racing with vblank irq */
295 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
296
297 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
298
299 /* page flip completed. clean up */
300 amdgpu_crtc->event = NULL;
301
302 } else
303 WARN_ON(1);
304
305 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
306 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
307
308 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
309 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
310
311 drm_crtc_vblank_put(&amdgpu_crtc->base);
312 }
313
dm_crtc_high_irq(void * interrupt_params)314 static void dm_crtc_high_irq(void *interrupt_params)
315 {
316 struct common_irq_params *irq_params = interrupt_params;
317 struct amdgpu_device *adev = irq_params->adev;
318 uint8_t crtc_index = 0;
319 struct amdgpu_crtc *acrtc;
320
321 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
322
323 if (acrtc)
324 crtc_index = acrtc->crtc_id;
325
326 drm_handle_vblank(adev->ddev, crtc_index);
327 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
328 }
329
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)330 static int dm_set_clockgating_state(void *handle,
331 enum amd_clockgating_state state)
332 {
333 return 0;
334 }
335
dm_set_powergating_state(void * handle,enum amd_powergating_state state)336 static int dm_set_powergating_state(void *handle,
337 enum amd_powergating_state state)
338 {
339 return 0;
340 }
341
342 /* Prototypes of private functions */
343 static int dm_early_init(void* handle);
344
hotplug_notify_work_func(struct work_struct * work)345 static void hotplug_notify_work_func(struct work_struct *work)
346 {
347 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
348 struct drm_device *dev = dm->ddev;
349
350 drm_kms_helper_hotplug_event(dev);
351 }
352
353 /* Allocate memory for FBC compressed data */
amdgpu_dm_fbc_init(struct drm_connector * connector)354 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
355 {
356 struct drm_device *dev = connector->dev;
357 struct amdgpu_device *adev = dev->dev_private;
358 struct dm_comressor_info *compressor = &adev->dm.compressor;
359 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
360 struct drm_display_mode *mode;
361 unsigned long max_size = 0;
362
363 if (adev->dm.dc->fbc_compressor == NULL)
364 return;
365
366 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
367 return;
368
369 if (compressor->bo_ptr)
370 return;
371
372
373 list_for_each_entry(mode, &connector->modes, head) {
374 if (max_size < mode->htotal * mode->vtotal)
375 max_size = mode->htotal * mode->vtotal;
376 }
377
378 if (max_size) {
379 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
380 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
381 (u64 *)&compressor->gpu_addr, &compressor->cpu_addr);
382
383 if (r)
384 DRM_ERROR("DM: Failed to initialize FBC\n");
385 else {
386 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
387 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
388 }
389
390 }
391
392 }
393
394
395 /* Init display KMS
396 *
397 * Returns 0 on success
398 */
amdgpu_dm_init(struct amdgpu_device * adev)399 static int amdgpu_dm_init(struct amdgpu_device *adev)
400 {
401 struct dc_init_data init_data;
402 adev->dm.ddev = adev->ddev;
403 adev->dm.adev = adev;
404
405 /* Zero all the fields */
406 memset(&init_data, 0, sizeof(init_data));
407
408 if(amdgpu_dm_irq_init(adev)) {
409 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
410 goto error;
411 }
412
413 init_data.asic_id.chip_family = adev->family;
414
415 init_data.asic_id.pci_revision_id = adev->rev_id;
416 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
417 init_data.asic_id.chip_id = adev->pdev->device;
418
419 init_data.asic_id.vram_width = adev->gmc.vram_width;
420 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
421 init_data.asic_id.atombios_base_address =
422 adev->mode_info.atom_context->bios;
423
424 init_data.driver = adev;
425
426 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
427
428 if (!adev->dm.cgs_device) {
429 DRM_ERROR("amdgpu: failed to create cgs device.\n");
430 goto error;
431 }
432
433 init_data.cgs_device = adev->dm.cgs_device;
434
435 adev->dm.dal = NULL;
436
437 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
438
439 /*
440 * TODO debug why this doesn't work on Raven
441 */
442 if (adev->flags & AMD_IS_APU &&
443 adev->asic_type >= CHIP_CARRIZO &&
444 adev->asic_type < CHIP_RAVEN)
445 init_data.flags.gpu_vm_support = true;
446
447 /* Display Core create. */
448 adev->dm.dc = dc_create(&init_data);
449
450 if (adev->dm.dc) {
451 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
452 } else {
453 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
454 goto error;
455 }
456
457 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
458
459 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
460 if (!adev->dm.freesync_module) {
461 DRM_ERROR(
462 "amdgpu: failed to initialize freesync_module.\n");
463 } else
464 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
465 adev->dm.freesync_module);
466
467 amdgpu_dm_init_color_mod();
468
469 if (amdgpu_dm_initialize_drm_device(adev)) {
470 DRM_ERROR(
471 "amdgpu: failed to initialize sw for display support.\n");
472 goto error;
473 }
474
475 /* Update the actual used number of crtc */
476 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
477
478 /* TODO: Add_display_info? */
479
480 /* TODO use dynamic cursor width */
481 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
482 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
483
484 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
485 DRM_ERROR(
486 "amdgpu: failed to initialize sw for display support.\n");
487 goto error;
488 }
489
490 DRM_DEBUG_DRIVER("KMS initialized.\n");
491
492 return 0;
493 error:
494 amdgpu_dm_fini(adev);
495
496 return -1;
497 }
498
amdgpu_dm_fini(struct amdgpu_device * adev)499 static void amdgpu_dm_fini(struct amdgpu_device *adev)
500 {
501 amdgpu_dm_destroy_drm_device(&adev->dm);
502 /*
503 * TODO: pageflip, vlank interrupt
504 *
505 * amdgpu_dm_irq_fini(adev);
506 */
507
508 if (adev->dm.cgs_device) {
509 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
510 adev->dm.cgs_device = NULL;
511 }
512 if (adev->dm.freesync_module) {
513 mod_freesync_destroy(adev->dm.freesync_module);
514 adev->dm.freesync_module = NULL;
515 }
516 /* DC Destroy TODO: Replace destroy DAL */
517 if (adev->dm.dc)
518 dc_destroy(&adev->dm.dc);
519 return;
520 }
521
dm_sw_init(void * handle)522 static int dm_sw_init(void *handle)
523 {
524 return 0;
525 }
526
dm_sw_fini(void * handle)527 static int dm_sw_fini(void *handle)
528 {
529 return 0;
530 }
531
detect_mst_link_for_all_connectors(struct drm_device * dev)532 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
533 {
534 struct amdgpu_dm_connector *aconnector;
535 struct drm_connector *connector;
536 int ret = 0;
537
538 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
539
540 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
541 aconnector = to_amdgpu_dm_connector(connector);
542 if (aconnector->dc_link->type == dc_connection_mst_branch &&
543 aconnector->mst_mgr.aux) {
544 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
545 aconnector, aconnector->base.base.id);
546
547 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
548 if (ret < 0) {
549 DRM_ERROR("DM_MST: Failed to start MST\n");
550 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
551 return ret;
552 }
553 }
554 }
555
556 drm_modeset_unlock(&dev->mode_config.connection_mutex);
557 return ret;
558 }
559
dm_late_init(void * handle)560 static int dm_late_init(void *handle)
561 {
562 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
563
564 return detect_mst_link_for_all_connectors(adev->ddev);
565 }
566
s3_handle_mst(struct drm_device * dev,bool suspend)567 static void s3_handle_mst(struct drm_device *dev, bool suspend)
568 {
569 struct amdgpu_dm_connector *aconnector;
570 struct drm_connector *connector;
571 struct drm_dp_mst_topology_mgr *mgr;
572 int ret;
573 bool need_hotplug = false;
574
575 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
576
577 list_for_each_entry(connector, &dev->mode_config.connector_list,
578 head) {
579 aconnector = to_amdgpu_dm_connector(connector);
580 if (aconnector->dc_link->type != dc_connection_mst_branch ||
581 aconnector->mst_port)
582 continue;
583
584 mgr = &aconnector->mst_mgr;
585
586 if (suspend) {
587 drm_dp_mst_topology_mgr_suspend(mgr);
588 } else {
589 ret = drm_dp_mst_topology_mgr_resume(mgr);
590 if (ret < 0) {
591 drm_dp_mst_topology_mgr_set_mst(mgr, false);
592 need_hotplug = true;
593 }
594 }
595 }
596
597 drm_modeset_unlock(&dev->mode_config.connection_mutex);
598
599 if (need_hotplug)
600 drm_kms_helper_hotplug_event(dev);
601 }
602
dm_hw_init(void * handle)603 static int dm_hw_init(void *handle)
604 {
605 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
606 /* Create DAL display manager */
607 amdgpu_dm_init(adev);
608 amdgpu_dm_hpd_init(adev);
609
610 return 0;
611 }
612
dm_hw_fini(void * handle)613 static int dm_hw_fini(void *handle)
614 {
615 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
616
617 amdgpu_dm_hpd_fini(adev);
618
619 amdgpu_dm_irq_fini(adev);
620 amdgpu_dm_fini(adev);
621 return 0;
622 }
623
dm_suspend(void * handle)624 static int dm_suspend(void *handle)
625 {
626 struct amdgpu_device *adev = handle;
627 struct amdgpu_display_manager *dm = &adev->dm;
628 int ret = 0;
629
630 WARN_ON(adev->dm.cached_state);
631 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
632
633 s3_handle_mst(adev->ddev, true);
634
635 amdgpu_dm_irq_suspend(adev);
636
637
638 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
639
640 return ret;
641 }
642
643 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)644 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
645 struct drm_crtc *crtc)
646 {
647 uint32_t i;
648 struct drm_connector_state *new_con_state;
649 struct drm_connector *connector;
650 struct drm_crtc *crtc_from_state;
651
652 for_each_new_connector_in_state(state, connector, new_con_state, i) {
653 crtc_from_state = new_con_state->crtc;
654
655 if (crtc_from_state == crtc)
656 return to_amdgpu_dm_connector(connector);
657 }
658
659 return NULL;
660 }
661
emulated_link_detect(struct dc_link * link)662 static void emulated_link_detect(struct dc_link *link)
663 {
664 struct dc_sink_init_data sink_init_data = { 0 };
665 struct display_sink_capability sink_caps = { 0 };
666 enum dc_edid_status edid_status;
667 struct dc_context *dc_ctx = link->ctx;
668 struct dc_sink *sink = NULL;
669 struct dc_sink *prev_sink = NULL;
670
671 link->type = dc_connection_none;
672 prev_sink = link->local_sink;
673
674 if (prev_sink)
675 dc_sink_release(prev_sink);
676
677 switch (link->connector_signal) {
678 case SIGNAL_TYPE_HDMI_TYPE_A: {
679 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
680 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
681 break;
682 }
683
684 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
685 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
686 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
687 break;
688 }
689
690 case SIGNAL_TYPE_DVI_DUAL_LINK: {
691 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
692 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
693 break;
694 }
695
696 case SIGNAL_TYPE_LVDS: {
697 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
698 sink_caps.signal = SIGNAL_TYPE_LVDS;
699 break;
700 }
701
702 case SIGNAL_TYPE_EDP: {
703 sink_caps.transaction_type =
704 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
705 sink_caps.signal = SIGNAL_TYPE_EDP;
706 break;
707 }
708
709 case SIGNAL_TYPE_DISPLAY_PORT: {
710 sink_caps.transaction_type =
711 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
712 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
713 break;
714 }
715
716 default:
717 DC_ERROR("Invalid connector type! signal:%d\n",
718 link->connector_signal);
719 return;
720 }
721
722 sink_init_data.link = link;
723 sink_init_data.sink_signal = sink_caps.signal;
724
725 sink = dc_sink_create(&sink_init_data);
726 if (!sink) {
727 DC_ERROR("Failed to create sink!\n");
728 return;
729 }
730
731 link->local_sink = sink;
732
733 edid_status = dm_helpers_read_local_edid(
734 link->ctx,
735 link,
736 sink);
737
738 if (edid_status != EDID_OK)
739 DC_ERROR("Failed to read EDID");
740
741 }
742
dm_resume(void * handle)743 static int dm_resume(void *handle)
744 {
745 struct amdgpu_device *adev = handle;
746 struct drm_device *ddev = adev->ddev;
747 struct amdgpu_display_manager *dm = &adev->dm;
748 struct amdgpu_dm_connector *aconnector;
749 struct drm_connector *connector;
750 struct drm_crtc *crtc;
751 struct drm_crtc_state *new_crtc_state;
752 struct dm_crtc_state *dm_new_crtc_state;
753 struct drm_plane *plane;
754 struct drm_plane_state *new_plane_state;
755 struct dm_plane_state *dm_new_plane_state;
756 enum dc_connection_type new_connection_type = dc_connection_none;
757 int i;
758
759 /* power on hardware */
760 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
761
762 /* program HPD filter */
763 dc_resume(dm->dc);
764
765 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
766 s3_handle_mst(ddev, false);
767
768 /*
769 * early enable HPD Rx IRQ, should be done before set mode as short
770 * pulse interrupts are used for MST
771 */
772 amdgpu_dm_irq_resume_early(adev);
773
774 /* Do detection*/
775 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
776 aconnector = to_amdgpu_dm_connector(connector);
777
778 /*
779 * this is the case when traversing through already created
780 * MST connectors, should be skipped
781 */
782 if (aconnector->mst_port)
783 continue;
784
785 mutex_lock(&aconnector->hpd_lock);
786 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
787 DRM_ERROR("KMS: Failed to detect connector\n");
788
789 if (aconnector->base.force && new_connection_type == dc_connection_none)
790 emulated_link_detect(aconnector->dc_link);
791 else
792 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
793
794 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
795 aconnector->fake_enable = false;
796
797 aconnector->dc_sink = NULL;
798 amdgpu_dm_update_connector_after_detect(aconnector);
799 mutex_unlock(&aconnector->hpd_lock);
800 }
801
802 /* Force mode set in atomic comit */
803 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
804 new_crtc_state->active_changed = true;
805
806 /*
807 * atomic_check is expected to create the dc states. We need to release
808 * them here, since they were duplicated as part of the suspend
809 * procedure.
810 */
811 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
812 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
813 if (dm_new_crtc_state->stream) {
814 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
815 dc_stream_release(dm_new_crtc_state->stream);
816 dm_new_crtc_state->stream = NULL;
817 }
818 }
819
820 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
821 dm_new_plane_state = to_dm_plane_state(new_plane_state);
822 if (dm_new_plane_state->dc_state) {
823 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
824 dc_plane_state_release(dm_new_plane_state->dc_state);
825 dm_new_plane_state->dc_state = NULL;
826 }
827 }
828
829 drm_atomic_helper_resume(ddev, dm->cached_state);
830
831 dm->cached_state = NULL;
832
833 amdgpu_dm_irq_resume_late(adev);
834
835 return 0;
836 }
837
838 static const struct amd_ip_funcs amdgpu_dm_funcs = {
839 .name = "dm",
840 .early_init = dm_early_init,
841 .late_init = dm_late_init,
842 .sw_init = dm_sw_init,
843 .sw_fini = dm_sw_fini,
844 .hw_init = dm_hw_init,
845 .hw_fini = dm_hw_fini,
846 .suspend = dm_suspend,
847 .resume = dm_resume,
848 .is_idle = dm_is_idle,
849 .wait_for_idle = dm_wait_for_idle,
850 .check_soft_reset = dm_check_soft_reset,
851 .soft_reset = dm_soft_reset,
852 .set_clockgating_state = dm_set_clockgating_state,
853 .set_powergating_state = dm_set_powergating_state,
854 };
855
856 const struct amdgpu_ip_block_version dm_ip_block =
857 {
858 .type = AMD_IP_BLOCK_TYPE_DCE,
859 .major = 1,
860 .minor = 0,
861 .rev = 0,
862 .funcs = &amdgpu_dm_funcs,
863 };
864
865
866 static struct drm_atomic_state *
dm_atomic_state_alloc(struct drm_device * dev)867 dm_atomic_state_alloc(struct drm_device *dev)
868 {
869 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
870
871 if (!state)
872 return NULL;
873
874 if (drm_atomic_state_init(dev, &state->base) < 0)
875 goto fail;
876
877 return &state->base;
878
879 fail:
880 kfree(state);
881 return NULL;
882 }
883
884 static void
dm_atomic_state_clear(struct drm_atomic_state * state)885 dm_atomic_state_clear(struct drm_atomic_state *state)
886 {
887 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
888
889 if (dm_state->context) {
890 dc_release_state(dm_state->context);
891 dm_state->context = NULL;
892 }
893
894 drm_atomic_state_default_clear(state);
895 }
896
897 static void
dm_atomic_state_alloc_free(struct drm_atomic_state * state)898 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
899 {
900 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
901 drm_atomic_state_default_release(state);
902 kfree(dm_state);
903 }
904
905 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
906 .fb_create = amdgpu_display_user_framebuffer_create,
907 .output_poll_changed = drm_fb_helper_output_poll_changed,
908 .atomic_check = amdgpu_dm_atomic_check,
909 .atomic_commit = amdgpu_dm_atomic_commit,
910 .atomic_state_alloc = dm_atomic_state_alloc,
911 .atomic_state_clear = dm_atomic_state_clear,
912 .atomic_state_free = dm_atomic_state_alloc_free
913 };
914
915 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
916 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
917 };
918
919 static void
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)920 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
921 {
922 struct drm_connector *connector = &aconnector->base;
923 struct drm_device *dev = connector->dev;
924 struct dc_sink *sink;
925
926 /* MST handled by drm_mst framework */
927 if (aconnector->mst_mgr.mst_state == true)
928 return;
929
930
931 sink = aconnector->dc_link->local_sink;
932
933 /* Edid mgmt connector gets first update only in mode_valid hook and then
934 * the connector sink is set to either fake or physical sink depends on link status.
935 * don't do it here if u are during boot
936 */
937 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
938 && aconnector->dc_em_sink) {
939
940 /* For S3 resume with headless use eml_sink to fake stream
941 * because on resume connecotr->sink is set ti NULL
942 */
943 mutex_lock(&dev->mode_config.mutex);
944
945 if (sink) {
946 if (aconnector->dc_sink) {
947 amdgpu_dm_remove_sink_from_freesync_module(
948 connector);
949 /* retain and release bellow are used for
950 * bump up refcount for sink because the link don't point
951 * to it anymore after disconnect so on next crtc to connector
952 * reshuffle by UMD we will get into unwanted dc_sink release
953 */
954 if (aconnector->dc_sink != aconnector->dc_em_sink)
955 dc_sink_release(aconnector->dc_sink);
956 }
957 aconnector->dc_sink = sink;
958 amdgpu_dm_add_sink_to_freesync_module(
959 connector, aconnector->edid);
960 } else {
961 amdgpu_dm_remove_sink_from_freesync_module(connector);
962 if (!aconnector->dc_sink)
963 aconnector->dc_sink = aconnector->dc_em_sink;
964 else if (aconnector->dc_sink != aconnector->dc_em_sink)
965 dc_sink_retain(aconnector->dc_sink);
966 }
967
968 mutex_unlock(&dev->mode_config.mutex);
969 return;
970 }
971
972 /*
973 * TODO: temporary guard to look for proper fix
974 * if this sink is MST sink, we should not do anything
975 */
976 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
977 return;
978
979 if (aconnector->dc_sink == sink) {
980 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
981 * Do nothing!! */
982 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
983 aconnector->connector_id);
984 return;
985 }
986
987 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
988 aconnector->connector_id, aconnector->dc_sink, sink);
989
990 mutex_lock(&dev->mode_config.mutex);
991
992 /* 1. Update status of the drm connector
993 * 2. Send an event and let userspace tell us what to do */
994 if (sink) {
995 /* TODO: check if we still need the S3 mode update workaround.
996 * If yes, put it here. */
997 if (aconnector->dc_sink)
998 amdgpu_dm_remove_sink_from_freesync_module(
999 connector);
1000
1001 aconnector->dc_sink = sink;
1002 if (sink->dc_edid.length == 0) {
1003 aconnector->edid = NULL;
1004 } else {
1005 aconnector->edid =
1006 (struct edid *) sink->dc_edid.raw_edid;
1007
1008
1009 drm_connector_update_edid_property(connector,
1010 aconnector->edid);
1011 }
1012 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
1013
1014 } else {
1015 amdgpu_dm_remove_sink_from_freesync_module(connector);
1016 drm_connector_update_edid_property(connector, NULL);
1017 aconnector->num_modes = 0;
1018 aconnector->dc_sink = NULL;
1019 aconnector->edid = NULL;
1020 }
1021
1022 mutex_unlock(&dev->mode_config.mutex);
1023 }
1024
handle_hpd_irq(void * param)1025 static void handle_hpd_irq(void *param)
1026 {
1027 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1028 struct drm_connector *connector = &aconnector->base;
1029 struct drm_device *dev = connector->dev;
1030 enum dc_connection_type new_connection_type = dc_connection_none;
1031
1032 /* In case of failure or MST no need to update connector status or notify the OS
1033 * since (for MST case) MST does this in it's own context.
1034 */
1035 mutex_lock(&aconnector->hpd_lock);
1036
1037 if (aconnector->fake_enable)
1038 aconnector->fake_enable = false;
1039
1040 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1041 DRM_ERROR("KMS: Failed to detect connector\n");
1042
1043 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1044 emulated_link_detect(aconnector->dc_link);
1045
1046
1047 drm_modeset_lock_all(dev);
1048 dm_restore_drm_connector_state(dev, connector);
1049 drm_modeset_unlock_all(dev);
1050
1051 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1052 drm_kms_helper_hotplug_event(dev);
1053
1054 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1055 amdgpu_dm_update_connector_after_detect(aconnector);
1056
1057
1058 drm_modeset_lock_all(dev);
1059 dm_restore_drm_connector_state(dev, connector);
1060 drm_modeset_unlock_all(dev);
1061
1062 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1063 drm_kms_helper_hotplug_event(dev);
1064 }
1065 mutex_unlock(&aconnector->hpd_lock);
1066
1067 }
1068
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector * aconnector)1069 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1070 {
1071 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1072 uint8_t dret;
1073 bool new_irq_handled = false;
1074 int dpcd_addr;
1075 int dpcd_bytes_to_read;
1076
1077 const int max_process_count = 30;
1078 int process_count = 0;
1079
1080 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1081
1082 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1083 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1084 /* DPCD 0x200 - 0x201 for downstream IRQ */
1085 dpcd_addr = DP_SINK_COUNT;
1086 } else {
1087 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1088 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1089 dpcd_addr = DP_SINK_COUNT_ESI;
1090 }
1091
1092 dret = drm_dp_dpcd_read(
1093 &aconnector->dm_dp_aux.aux,
1094 dpcd_addr,
1095 esi,
1096 dpcd_bytes_to_read);
1097
1098 while (dret == dpcd_bytes_to_read &&
1099 process_count < max_process_count) {
1100 uint8_t retry;
1101 dret = 0;
1102
1103 process_count++;
1104
1105 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1106 /* handle HPD short pulse irq */
1107 if (aconnector->mst_mgr.mst_state)
1108 drm_dp_mst_hpd_irq(
1109 &aconnector->mst_mgr,
1110 esi,
1111 &new_irq_handled);
1112
1113 if (new_irq_handled) {
1114 /* ACK at DPCD to notify down stream */
1115 const int ack_dpcd_bytes_to_write =
1116 dpcd_bytes_to_read - 1;
1117
1118 for (retry = 0; retry < 3; retry++) {
1119 uint8_t wret;
1120
1121 wret = drm_dp_dpcd_write(
1122 &aconnector->dm_dp_aux.aux,
1123 dpcd_addr + 1,
1124 &esi[1],
1125 ack_dpcd_bytes_to_write);
1126 if (wret == ack_dpcd_bytes_to_write)
1127 break;
1128 }
1129
1130 /* check if there is new irq to be handle */
1131 dret = drm_dp_dpcd_read(
1132 &aconnector->dm_dp_aux.aux,
1133 dpcd_addr,
1134 esi,
1135 dpcd_bytes_to_read);
1136
1137 new_irq_handled = false;
1138 } else {
1139 break;
1140 }
1141 }
1142
1143 if (process_count == max_process_count)
1144 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1145 }
1146
handle_hpd_rx_irq(void * param)1147 static void handle_hpd_rx_irq(void *param)
1148 {
1149 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1150 struct drm_connector *connector = &aconnector->base;
1151 struct drm_device *dev = connector->dev;
1152 struct dc_link *dc_link = aconnector->dc_link;
1153 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1154 enum dc_connection_type new_connection_type = dc_connection_none;
1155
1156 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1157 * conflict, after implement i2c helper, this mutex should be
1158 * retired.
1159 */
1160 if (dc_link->type != dc_connection_mst_branch)
1161 mutex_lock(&aconnector->hpd_lock);
1162
1163 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1164 !is_mst_root_connector) {
1165 /* Downstream Port status changed. */
1166 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1167 DRM_ERROR("KMS: Failed to detect connector\n");
1168
1169 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1170 emulated_link_detect(dc_link);
1171
1172 if (aconnector->fake_enable)
1173 aconnector->fake_enable = false;
1174
1175 amdgpu_dm_update_connector_after_detect(aconnector);
1176
1177
1178 drm_modeset_lock_all(dev);
1179 dm_restore_drm_connector_state(dev, connector);
1180 drm_modeset_unlock_all(dev);
1181
1182 drm_kms_helper_hotplug_event(dev);
1183 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1184
1185 if (aconnector->fake_enable)
1186 aconnector->fake_enable = false;
1187
1188 amdgpu_dm_update_connector_after_detect(aconnector);
1189
1190
1191 drm_modeset_lock_all(dev);
1192 dm_restore_drm_connector_state(dev, connector);
1193 drm_modeset_unlock_all(dev);
1194
1195 drm_kms_helper_hotplug_event(dev);
1196 }
1197 }
1198 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1199 (dc_link->type == dc_connection_mst_branch))
1200 dm_handle_hpd_rx_irq(aconnector);
1201
1202 if (dc_link->type != dc_connection_mst_branch)
1203 mutex_unlock(&aconnector->hpd_lock);
1204 }
1205
register_hpd_handlers(struct amdgpu_device * adev)1206 static void register_hpd_handlers(struct amdgpu_device *adev)
1207 {
1208 struct drm_device *dev = adev->ddev;
1209 struct drm_connector *connector;
1210 struct amdgpu_dm_connector *aconnector;
1211 const struct dc_link *dc_link;
1212 struct dc_interrupt_params int_params = {0};
1213
1214 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1215 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1216
1217 list_for_each_entry(connector,
1218 &dev->mode_config.connector_list, head) {
1219
1220 aconnector = to_amdgpu_dm_connector(connector);
1221 dc_link = aconnector->dc_link;
1222
1223 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1224 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1225 int_params.irq_source = dc_link->irq_source_hpd;
1226
1227 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1228 handle_hpd_irq,
1229 (void *) aconnector);
1230 }
1231
1232 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1233
1234 /* Also register for DP short pulse (hpd_rx). */
1235 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1236 int_params.irq_source = dc_link->irq_source_hpd_rx;
1237
1238 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1239 handle_hpd_rx_irq,
1240 (void *) aconnector);
1241 }
1242 }
1243 }
1244
1245 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)1246 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1247 {
1248 struct dc *dc = adev->dm.dc;
1249 struct common_irq_params *c_irq_params;
1250 struct dc_interrupt_params int_params = {0};
1251 int r;
1252 int i;
1253 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1254
1255 if (adev->asic_type == CHIP_VEGA10 ||
1256 adev->asic_type == CHIP_VEGA12 ||
1257 adev->asic_type == CHIP_VEGA20 ||
1258 adev->asic_type == CHIP_RAVEN)
1259 client_id = SOC15_IH_CLIENTID_DCE;
1260
1261 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1262 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1263
1264 /* Actions of amdgpu_irq_add_id():
1265 * 1. Register a set() function with base driver.
1266 * Base driver will call set() function to enable/disable an
1267 * interrupt in DC hardware.
1268 * 2. Register amdgpu_dm_irq_handler().
1269 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1270 * coming from DC hardware.
1271 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1272 * for acknowledging and handling. */
1273
1274 /* Use VBLANK interrupt */
1275 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1276 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1277 if (r) {
1278 DRM_ERROR("Failed to add crtc irq id!\n");
1279 return r;
1280 }
1281
1282 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1283 int_params.irq_source =
1284 dc_interrupt_to_irq_source(dc, i, 0);
1285
1286 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1287
1288 c_irq_params->adev = adev;
1289 c_irq_params->irq_src = int_params.irq_source;
1290
1291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1292 dm_crtc_high_irq, c_irq_params);
1293 }
1294
1295 /* Use GRPH_PFLIP interrupt */
1296 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1297 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1298 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1299 if (r) {
1300 DRM_ERROR("Failed to add page flip irq id!\n");
1301 return r;
1302 }
1303
1304 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1305 int_params.irq_source =
1306 dc_interrupt_to_irq_source(dc, i, 0);
1307
1308 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1309
1310 c_irq_params->adev = adev;
1311 c_irq_params->irq_src = int_params.irq_source;
1312
1313 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1314 dm_pflip_high_irq, c_irq_params);
1315
1316 }
1317
1318 /* HPD */
1319 r = amdgpu_irq_add_id(adev, client_id,
1320 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1321 if (r) {
1322 DRM_ERROR("Failed to add hpd irq id!\n");
1323 return r;
1324 }
1325
1326 register_hpd_handlers(adev);
1327
1328 return 0;
1329 }
1330
1331 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1332 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)1333 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1334 {
1335 struct dc *dc = adev->dm.dc;
1336 struct common_irq_params *c_irq_params;
1337 struct dc_interrupt_params int_params = {0};
1338 int r;
1339 int i;
1340
1341 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1342 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1343
1344 /* Actions of amdgpu_irq_add_id():
1345 * 1. Register a set() function with base driver.
1346 * Base driver will call set() function to enable/disable an
1347 * interrupt in DC hardware.
1348 * 2. Register amdgpu_dm_irq_handler().
1349 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1350 * coming from DC hardware.
1351 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1352 * for acknowledging and handling.
1353 * */
1354
1355 /* Use VSTARTUP interrupt */
1356 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1357 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1358 i++) {
1359 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1360
1361 if (r) {
1362 DRM_ERROR("Failed to add crtc irq id!\n");
1363 return r;
1364 }
1365
1366 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1367 int_params.irq_source =
1368 dc_interrupt_to_irq_source(dc, i, 0);
1369
1370 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1371
1372 c_irq_params->adev = adev;
1373 c_irq_params->irq_src = int_params.irq_source;
1374
1375 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1376 dm_crtc_high_irq, c_irq_params);
1377 }
1378
1379 /* Use GRPH_PFLIP interrupt */
1380 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1381 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1382 i++) {
1383 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1384 if (r) {
1385 DRM_ERROR("Failed to add page flip irq id!\n");
1386 return r;
1387 }
1388
1389 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1390 int_params.irq_source =
1391 dc_interrupt_to_irq_source(dc, i, 0);
1392
1393 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1394
1395 c_irq_params->adev = adev;
1396 c_irq_params->irq_src = int_params.irq_source;
1397
1398 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1399 dm_pflip_high_irq, c_irq_params);
1400
1401 }
1402
1403 /* HPD */
1404 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1405 &adev->hpd_irq);
1406 if (r) {
1407 DRM_ERROR("Failed to add hpd irq id!\n");
1408 return r;
1409 }
1410
1411 register_hpd_handlers(adev);
1412
1413 return 0;
1414 }
1415 #endif
1416
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)1417 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1418 {
1419 int r;
1420
1421 adev->mode_info.mode_config_initialized = true;
1422
1423 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1424 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1425
1426 adev->ddev->mode_config.max_width = 16384;
1427 adev->ddev->mode_config.max_height = 16384;
1428
1429 adev->ddev->mode_config.preferred_depth = 24;
1430 adev->ddev->mode_config.prefer_shadow = 1;
1431 /* indicate support of immediate flip */
1432 adev->ddev->mode_config.async_page_flip = true;
1433
1434 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1435
1436 r = amdgpu_display_modeset_create_props(adev);
1437 if (r)
1438 return r;
1439
1440 return 0;
1441 }
1442
1443 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1444 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1445
1446 #if 0
1447 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1448 {
1449 struct amdgpu_display_manager *dm = bl_get_data(bd);
1450
1451 if (dc_link_set_backlight_level(dm->backlight_link,
1452 bd->props.brightness, 0, 0))
1453 return 0;
1454 else
1455 return 1;
1456 }
1457
1458 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1459 {
1460 struct amdgpu_display_manager *dm = bl_get_data(bd);
1461 int ret = dc_link_get_backlight_level(dm->backlight_link);
1462
1463 if (ret == DC_ERROR_UNEXPECTED)
1464 return bd->props.brightness;
1465 return ret;
1466 }
1467
1468 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1469 .options = BL_CORE_SUSPENDRESUME,
1470 .get_brightness = amdgpu_dm_backlight_get_brightness,
1471 .update_status = amdgpu_dm_backlight_update_status,
1472 };
1473 #endif
1474
1475 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)1476 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1477 {
1478 #if 0
1479 char bl_name[16];
1480 struct backlight_properties props = { 0 };
1481
1482 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1483 props.brightness = AMDGPU_MAX_BL_LEVEL;
1484 props.type = BACKLIGHT_RAW;
1485
1486 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1487 dm->adev->ddev->primary->index);
1488
1489 dm->backlight_dev = backlight_device_register(bl_name,
1490 dm->adev->ddev->dev,
1491 dm,
1492 &amdgpu_dm_backlight_ops,
1493 &props);
1494
1495 if (IS_ERR(dm->backlight_dev))
1496 DRM_ERROR("DM: Backlight registration failed!\n");
1497 else
1498 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1499 #endif
1500 }
1501
1502 #endif
1503
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id)1504 static int initialize_plane(struct amdgpu_display_manager *dm,
1505 struct amdgpu_mode_info *mode_info,
1506 int plane_id)
1507 {
1508 struct amdgpu_plane *plane;
1509 unsigned long possible_crtcs;
1510 int ret = 0;
1511
1512 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1513 mode_info->planes[plane_id] = plane;
1514
1515 if (!plane) {
1516 DRM_ERROR("KMS: Failed to allocate plane\n");
1517 return -ENOMEM;
1518 }
1519 plane->base.type = mode_info->plane_type[plane_id];
1520
1521 /*
1522 * HACK: IGT tests expect that each plane can only have one
1523 * one possible CRTC. For now, set one CRTC for each
1524 * plane that is not an underlay, but still allow multiple
1525 * CRTCs for underlay planes.
1526 */
1527 possible_crtcs = 1 << plane_id;
1528 if (plane_id >= dm->dc->caps.max_streams)
1529 possible_crtcs = 0xff;
1530
1531 ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
1532
1533 if (ret) {
1534 DRM_ERROR("KMS: Failed to initialize plane\n");
1535 return ret;
1536 }
1537
1538 return ret;
1539 }
1540
1541
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)1542 static void register_backlight_device(struct amdgpu_display_manager *dm,
1543 struct dc_link *link)
1544 {
1545 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1546 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1547
1548 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
1549 link->type != dc_connection_none) {
1550 /* Event if registration failed, we should continue with
1551 * DM initialization because not having a backlight control
1552 * is better then a black screen.
1553 */
1554 amdgpu_dm_register_backlight_device(dm);
1555
1556 if (dm->backlight_dev)
1557 dm->backlight_link = link;
1558 }
1559 #endif
1560 }
1561
1562
1563 /* In this architecture, the association
1564 * connector -> encoder -> crtc
1565 * id not really requried. The crtc and connector will hold the
1566 * display_index as an abstraction to use with DAL component
1567 *
1568 * Returns 0 on success
1569 */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)1570 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1571 {
1572 struct amdgpu_display_manager *dm = &adev->dm;
1573 int32_t i;
1574 struct amdgpu_dm_connector *aconnector = NULL;
1575 struct amdgpu_encoder *aencoder = NULL;
1576 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1577 uint32_t link_cnt;
1578 int32_t total_overlay_planes, total_primary_planes;
1579 enum dc_connection_type new_connection_type = dc_connection_none;
1580
1581 link_cnt = dm->dc->caps.max_links;
1582 if (amdgpu_dm_mode_config_init(dm->adev)) {
1583 DRM_ERROR("DM: Failed to initialize mode config\n");
1584 return -1;
1585 }
1586
1587 /* Identify the number of planes to be initialized */
1588 total_overlay_planes = dm->dc->caps.max_slave_planes;
1589 total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
1590
1591 /* First initialize overlay planes, index starting after primary planes */
1592 for (i = (total_overlay_planes - 1); i >= 0; i--) {
1593 if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
1594 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
1595 goto fail;
1596 }
1597 }
1598
1599 /* Initialize primary planes */
1600 for (i = (total_primary_planes - 1); i >= 0; i--) {
1601 if (initialize_plane(dm, mode_info, i)) {
1602 DRM_ERROR("KMS: Failed to initialize primary plane\n");
1603 goto fail;
1604 }
1605 }
1606
1607 for (i = 0; i < dm->dc->caps.max_streams; i++)
1608 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1609 DRM_ERROR("KMS: Failed to initialize crtc\n");
1610 goto fail;
1611 }
1612
1613 dm->display_indexes_num = dm->dc->caps.max_streams;
1614
1615 /* loops over all connectors on the board */
1616 for (i = 0; i < link_cnt; i++) {
1617 struct dc_link *link = NULL;
1618
1619 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1620 DRM_ERROR(
1621 "KMS: Cannot support more than %d display indexes\n",
1622 AMDGPU_DM_MAX_DISPLAY_INDEX);
1623 continue;
1624 }
1625
1626 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1627 if (!aconnector)
1628 goto fail;
1629
1630 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1631 if (!aencoder)
1632 goto fail;
1633
1634 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1635 DRM_ERROR("KMS: Failed to initialize encoder\n");
1636 goto fail;
1637 }
1638
1639 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1640 DRM_ERROR("KMS: Failed to initialize connector\n");
1641 goto fail;
1642 }
1643
1644 link = dc_get_link_at_index(dm->dc, i);
1645
1646 if (!dc_link_detect_sink(link, &new_connection_type))
1647 DRM_ERROR("KMS: Failed to detect connector\n");
1648
1649 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1650 emulated_link_detect(link);
1651 amdgpu_dm_update_connector_after_detect(aconnector);
1652
1653 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1654 amdgpu_dm_update_connector_after_detect(aconnector);
1655 register_backlight_device(dm, link);
1656 }
1657
1658
1659 }
1660
1661 /* Software is initialized. Now we can register interrupt handlers. */
1662 switch (adev->asic_type) {
1663 case CHIP_BONAIRE:
1664 case CHIP_HAWAII:
1665 case CHIP_KAVERI:
1666 case CHIP_KABINI:
1667 case CHIP_MULLINS:
1668 case CHIP_TONGA:
1669 case CHIP_FIJI:
1670 case CHIP_CARRIZO:
1671 case CHIP_STONEY:
1672 case CHIP_POLARIS11:
1673 case CHIP_POLARIS10:
1674 case CHIP_POLARIS12:
1675 case CHIP_VEGAM:
1676 case CHIP_VEGA10:
1677 case CHIP_VEGA12:
1678 case CHIP_VEGA20:
1679 if (dce110_register_irq_handlers(dm->adev)) {
1680 DRM_ERROR("DM: Failed to initialize IRQ\n");
1681 goto fail;
1682 }
1683 break;
1684 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1685 case CHIP_RAVEN:
1686 if (dcn10_register_irq_handlers(dm->adev)) {
1687 DRM_ERROR("DM: Failed to initialize IRQ\n");
1688 goto fail;
1689 }
1690 break;
1691 #endif
1692 default:
1693 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1694 goto fail;
1695 }
1696
1697 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1698 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1699
1700 return 0;
1701 fail:
1702 kfree(aencoder);
1703 kfree(aconnector);
1704 for (i = 0; i < dm->dc->caps.max_planes; i++)
1705 kfree(mode_info->planes[i]);
1706 return -1;
1707 }
1708
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)1709 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1710 {
1711 drm_mode_config_cleanup(dm->ddev);
1712 return;
1713 }
1714
1715 /******************************************************************************
1716 * amdgpu_display_funcs functions
1717 *****************************************************************************/
1718
1719 /**
1720 * dm_bandwidth_update - program display watermarks
1721 *
1722 * @adev: amdgpu_device pointer
1723 *
1724 * Calculate and program the display watermarks and line buffer allocation.
1725 */
dm_bandwidth_update(struct amdgpu_device * adev)1726 static void dm_bandwidth_update(struct amdgpu_device *adev)
1727 {
1728 /* TODO: implement later */
1729 }
1730
amdgpu_notify_freesync(struct drm_device * dev,void * data,struct drm_file * filp)1731 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1732 struct drm_file *filp)
1733 {
1734 struct mod_freesync_params freesync_params;
1735 uint8_t num_streams;
1736 uint8_t i;
1737
1738 struct amdgpu_device *adev = dev->dev_private;
1739 int r = 0;
1740
1741 /* Get freesync enable flag from DRM */
1742
1743 num_streams = dc_get_current_stream_count(adev->dm.dc);
1744
1745 for (i = 0; i < num_streams; i++) {
1746 struct dc_stream_state *stream;
1747 stream = dc_get_stream_at_index(adev->dm.dc, i);
1748
1749 mod_freesync_update_state(adev->dm.freesync_module,
1750 &stream, 1, &freesync_params);
1751 }
1752
1753 return r;
1754 }
1755
1756 static const struct amdgpu_display_funcs dm_display_funcs = {
1757 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1758 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1759 .backlight_set_level = NULL, /* never called for DC */
1760 .backlight_get_level = NULL, /* never called for DC */
1761 .hpd_sense = NULL,/* called unconditionally */
1762 .hpd_set_polarity = NULL, /* called unconditionally */
1763 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1764 .page_flip_get_scanoutpos =
1765 dm_crtc_get_scanoutpos,/* called unconditionally */
1766 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1767 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1768 .notify_freesync = amdgpu_notify_freesync,
1769
1770 };
1771
1772 #if defined(CONFIG_DEBUG_KERNEL_DC)
1773
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)1774 static ssize_t s3_debug_store(struct device *device,
1775 struct device_attribute *attr,
1776 const char *buf,
1777 size_t count)
1778 {
1779 int ret;
1780 int s3_state;
1781 struct pci_dev *pdev = to_pci_dev(device);
1782 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1783 struct amdgpu_device *adev = drm_dev->dev_private;
1784
1785 ret = kstrtoint(buf, 0, &s3_state);
1786
1787 if (ret == 0) {
1788 if (s3_state) {
1789 dm_resume(adev);
1790 drm_kms_helper_hotplug_event(adev->ddev);
1791 } else
1792 dm_suspend(adev);
1793 }
1794
1795 return ret == 0 ? count : 0;
1796 }
1797
1798 DEVICE_ATTR_WO(s3_debug);
1799
1800 #endif
1801
dm_early_init(void * handle)1802 static int dm_early_init(void *handle)
1803 {
1804 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1805
1806 switch (adev->asic_type) {
1807 case CHIP_BONAIRE:
1808 case CHIP_HAWAII:
1809 adev->mode_info.num_crtc = 6;
1810 adev->mode_info.num_hpd = 6;
1811 adev->mode_info.num_dig = 6;
1812 adev->mode_info.plane_type = dm_plane_type_default;
1813 break;
1814 case CHIP_KAVERI:
1815 adev->mode_info.num_crtc = 4;
1816 adev->mode_info.num_hpd = 6;
1817 adev->mode_info.num_dig = 7;
1818 adev->mode_info.plane_type = dm_plane_type_default;
1819 break;
1820 case CHIP_KABINI:
1821 case CHIP_MULLINS:
1822 adev->mode_info.num_crtc = 2;
1823 adev->mode_info.num_hpd = 6;
1824 adev->mode_info.num_dig = 6;
1825 adev->mode_info.plane_type = dm_plane_type_default;
1826 break;
1827 case CHIP_FIJI:
1828 case CHIP_TONGA:
1829 adev->mode_info.num_crtc = 6;
1830 adev->mode_info.num_hpd = 6;
1831 adev->mode_info.num_dig = 7;
1832 adev->mode_info.plane_type = dm_plane_type_default;
1833 break;
1834 case CHIP_CARRIZO:
1835 adev->mode_info.num_crtc = 3;
1836 adev->mode_info.num_hpd = 6;
1837 adev->mode_info.num_dig = 9;
1838 adev->mode_info.plane_type = dm_plane_type_carizzo;
1839 break;
1840 case CHIP_STONEY:
1841 adev->mode_info.num_crtc = 2;
1842 adev->mode_info.num_hpd = 6;
1843 adev->mode_info.num_dig = 9;
1844 adev->mode_info.plane_type = dm_plane_type_stoney;
1845 break;
1846 case CHIP_POLARIS11:
1847 case CHIP_POLARIS12:
1848 adev->mode_info.num_crtc = 5;
1849 adev->mode_info.num_hpd = 5;
1850 adev->mode_info.num_dig = 5;
1851 adev->mode_info.plane_type = dm_plane_type_default;
1852 break;
1853 case CHIP_POLARIS10:
1854 case CHIP_VEGAM:
1855 adev->mode_info.num_crtc = 6;
1856 adev->mode_info.num_hpd = 6;
1857 adev->mode_info.num_dig = 6;
1858 adev->mode_info.plane_type = dm_plane_type_default;
1859 break;
1860 case CHIP_VEGA10:
1861 case CHIP_VEGA12:
1862 case CHIP_VEGA20:
1863 adev->mode_info.num_crtc = 6;
1864 adev->mode_info.num_hpd = 6;
1865 adev->mode_info.num_dig = 6;
1866 adev->mode_info.plane_type = dm_plane_type_default;
1867 break;
1868 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1869 case CHIP_RAVEN:
1870 adev->mode_info.num_crtc = 4;
1871 adev->mode_info.num_hpd = 4;
1872 adev->mode_info.num_dig = 4;
1873 adev->mode_info.plane_type = dm_plane_type_default;
1874 break;
1875 #endif
1876 default:
1877 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1878 return -EINVAL;
1879 }
1880
1881 amdgpu_dm_set_irq_funcs(adev);
1882
1883 if (adev->mode_info.funcs == NULL)
1884 adev->mode_info.funcs = &dm_display_funcs;
1885
1886 /* Note: Do NOT change adev->audio_endpt_rreg and
1887 * adev->audio_endpt_wreg because they are initialised in
1888 * amdgpu_device_init() */
1889 #if defined(CONFIG_DEBUG_KERNEL_DC)
1890 device_create_file(
1891 adev->ddev->dev,
1892 &dev_attr_s3_debug);
1893 #endif
1894
1895 return 0;
1896 }
1897
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)1898 static bool modeset_required(struct drm_crtc_state *crtc_state,
1899 struct dc_stream_state *new_stream,
1900 struct dc_stream_state *old_stream)
1901 {
1902 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1903 return false;
1904
1905 if (!crtc_state->enable)
1906 return false;
1907
1908 return crtc_state->active;
1909 }
1910
modereset_required(struct drm_crtc_state * crtc_state)1911 static bool modereset_required(struct drm_crtc_state *crtc_state)
1912 {
1913 if (!drm_atomic_crtc_needs_modeset(crtc_state))
1914 return false;
1915
1916 return !crtc_state->enable || !crtc_state->active;
1917 }
1918
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)1919 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1920 {
1921 drm_encoder_cleanup(encoder);
1922 kfree(encoder);
1923 }
1924
1925 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1926 .destroy = amdgpu_dm_encoder_destroy,
1927 };
1928
fill_rects_from_plane_state(const struct drm_plane_state * state,struct dc_plane_state * plane_state)1929 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1930 struct dc_plane_state *plane_state)
1931 {
1932 plane_state->src_rect.x = state->src_x >> 16;
1933 plane_state->src_rect.y = state->src_y >> 16;
1934 /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1935 plane_state->src_rect.width = state->src_w >> 16;
1936
1937 if (plane_state->src_rect.width == 0)
1938 return false;
1939
1940 plane_state->src_rect.height = state->src_h >> 16;
1941 if (plane_state->src_rect.height == 0)
1942 return false;
1943
1944 plane_state->dst_rect.x = state->crtc_x;
1945 plane_state->dst_rect.y = state->crtc_y;
1946
1947 if (state->crtc_w == 0)
1948 return false;
1949
1950 plane_state->dst_rect.width = state->crtc_w;
1951
1952 if (state->crtc_h == 0)
1953 return false;
1954
1955 plane_state->dst_rect.height = state->crtc_h;
1956
1957 plane_state->clip_rect = plane_state->dst_rect;
1958
1959 switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1960 case DRM_MODE_ROTATE_0:
1961 plane_state->rotation = ROTATION_ANGLE_0;
1962 break;
1963 case DRM_MODE_ROTATE_90:
1964 plane_state->rotation = ROTATION_ANGLE_90;
1965 break;
1966 case DRM_MODE_ROTATE_180:
1967 plane_state->rotation = ROTATION_ANGLE_180;
1968 break;
1969 case DRM_MODE_ROTATE_270:
1970 plane_state->rotation = ROTATION_ANGLE_270;
1971 break;
1972 default:
1973 plane_state->rotation = ROTATION_ANGLE_0;
1974 break;
1975 }
1976
1977 return true;
1978 }
get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags)1979 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1980 uint64_t *tiling_flags)
1981 {
1982 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1983 int r = amdgpu_bo_reserve(rbo, false);
1984
1985 if (unlikely(r)) {
1986 // Don't show error msg. when return -ERESTARTSYS
1987 if (r != -ERESTARTSYS)
1988 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1989 return r;
1990 }
1991
1992 if (tiling_flags)
1993 amdgpu_bo_get_tiling_flags(rbo, (u64 *)tiling_flags);
1994
1995 amdgpu_bo_unreserve(rbo);
1996
1997 return r;
1998 }
1999
fill_plane_attributes_from_fb(struct amdgpu_device * adev,struct dc_plane_state * plane_state,const struct amdgpu_framebuffer * amdgpu_fb)2000 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2001 struct dc_plane_state *plane_state,
2002 const struct amdgpu_framebuffer *amdgpu_fb)
2003 {
2004 uint64_t tiling_flags;
2005 unsigned int awidth;
2006 const struct drm_framebuffer *fb = &amdgpu_fb->base;
2007 int ret = 0;
2008 struct drm_format_name_buf format_name;
2009
2010 ret = get_fb_info(
2011 amdgpu_fb,
2012 &tiling_flags);
2013
2014 if (ret)
2015 return ret;
2016
2017 switch (fb->format->format) {
2018 case DRM_FORMAT_C8:
2019 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2020 break;
2021 case DRM_FORMAT_RGB565:
2022 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2023 break;
2024 case DRM_FORMAT_XRGB8888:
2025 case DRM_FORMAT_ARGB8888:
2026 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2027 break;
2028 case DRM_FORMAT_XRGB2101010:
2029 case DRM_FORMAT_ARGB2101010:
2030 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2031 break;
2032 case DRM_FORMAT_XBGR2101010:
2033 case DRM_FORMAT_ABGR2101010:
2034 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2035 break;
2036 case DRM_FORMAT_NV21:
2037 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2038 break;
2039 case DRM_FORMAT_NV12:
2040 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2041 break;
2042 default:
2043 DRM_ERROR("Unsupported screen format %s\n",
2044 drm_get_format_name(fb->format->format, &format_name));
2045 return -EINVAL;
2046 }
2047
2048 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2049 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
2050 plane_state->plane_size.grph.surface_size.x = 0;
2051 plane_state->plane_size.grph.surface_size.y = 0;
2052 plane_state->plane_size.grph.surface_size.width = fb->width;
2053 plane_state->plane_size.grph.surface_size.height = fb->height;
2054 plane_state->plane_size.grph.surface_pitch =
2055 fb->pitches[0] / fb->format->cpp[0];
2056 /* TODO: unhardcode */
2057 plane_state->color_space = COLOR_SPACE_SRGB;
2058
2059 } else {
2060 awidth = ALIGN(fb->width, 64);
2061 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2062 plane_state->plane_size.video.luma_size.x = 0;
2063 plane_state->plane_size.video.luma_size.y = 0;
2064 plane_state->plane_size.video.luma_size.width = awidth;
2065 plane_state->plane_size.video.luma_size.height = fb->height;
2066 /* TODO: unhardcode */
2067 plane_state->plane_size.video.luma_pitch = awidth;
2068
2069 plane_state->plane_size.video.chroma_size.x = 0;
2070 plane_state->plane_size.video.chroma_size.y = 0;
2071 plane_state->plane_size.video.chroma_size.width = awidth;
2072 plane_state->plane_size.video.chroma_size.height = fb->height;
2073 plane_state->plane_size.video.chroma_pitch = awidth / 2;
2074
2075 /* TODO: unhardcode */
2076 plane_state->color_space = COLOR_SPACE_YCBCR709;
2077 }
2078
2079 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
2080
2081 /* Fill GFX8 params */
2082 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2083 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2084
2085 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2086 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2087 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2088 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2089 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2090
2091 /* XXX fix me for VI */
2092 plane_state->tiling_info.gfx8.num_banks = num_banks;
2093 plane_state->tiling_info.gfx8.array_mode =
2094 DC_ARRAY_2D_TILED_THIN1;
2095 plane_state->tiling_info.gfx8.tile_split = tile_split;
2096 plane_state->tiling_info.gfx8.bank_width = bankw;
2097 plane_state->tiling_info.gfx8.bank_height = bankh;
2098 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
2099 plane_state->tiling_info.gfx8.tile_mode =
2100 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2101 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2102 == DC_ARRAY_1D_TILED_THIN1) {
2103 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2104 }
2105
2106 plane_state->tiling_info.gfx8.pipe_config =
2107 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2108
2109 if (adev->asic_type == CHIP_VEGA10 ||
2110 adev->asic_type == CHIP_VEGA12 ||
2111 adev->asic_type == CHIP_VEGA20 ||
2112 adev->asic_type == CHIP_RAVEN) {
2113 /* Fill GFX9 params */
2114 plane_state->tiling_info.gfx9.num_pipes =
2115 adev->gfx.config.gb_addr_config_fields.num_pipes;
2116 plane_state->tiling_info.gfx9.num_banks =
2117 adev->gfx.config.gb_addr_config_fields.num_banks;
2118 plane_state->tiling_info.gfx9.pipe_interleave =
2119 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2120 plane_state->tiling_info.gfx9.num_shader_engines =
2121 adev->gfx.config.gb_addr_config_fields.num_se;
2122 plane_state->tiling_info.gfx9.max_compressed_frags =
2123 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2124 plane_state->tiling_info.gfx9.num_rb_per_se =
2125 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2126 plane_state->tiling_info.gfx9.swizzle =
2127 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2128 plane_state->tiling_info.gfx9.shaderEnable = 1;
2129 }
2130
2131 plane_state->visible = true;
2132 plane_state->scaling_quality.h_taps_c = 0;
2133 plane_state->scaling_quality.v_taps_c = 0;
2134
2135 /* is this needed? is plane_state zeroed at allocation? */
2136 plane_state->scaling_quality.h_taps = 0;
2137 plane_state->scaling_quality.v_taps = 0;
2138 plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
2139
2140 return ret;
2141
2142 }
2143
fill_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)2144 static int fill_plane_attributes(struct amdgpu_device *adev,
2145 struct dc_plane_state *dc_plane_state,
2146 struct drm_plane_state *plane_state,
2147 struct drm_crtc_state *crtc_state)
2148 {
2149 const struct amdgpu_framebuffer *amdgpu_fb =
2150 to_amdgpu_framebuffer(plane_state->fb);
2151 const struct drm_crtc *crtc = plane_state->crtc;
2152 int ret = 0;
2153
2154 if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
2155 return -EINVAL;
2156
2157 ret = fill_plane_attributes_from_fb(
2158 crtc->dev->dev_private,
2159 dc_plane_state,
2160 amdgpu_fb);
2161
2162 if (ret)
2163 return ret;
2164
2165 /*
2166 * Always set input transfer function, since plane state is refreshed
2167 * every time.
2168 */
2169 ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2170 if (ret) {
2171 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2172 dc_plane_state->in_transfer_func = NULL;
2173 }
2174
2175 return ret;
2176 }
2177
2178 /*****************************************************************************/
2179
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)2180 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2181 const struct dm_connector_state *dm_state,
2182 struct dc_stream_state *stream)
2183 {
2184 enum amdgpu_rmx_type rmx_type;
2185
2186 struct rect src = { 0 }; /* viewport in composition space*/
2187 struct rect dst = { 0 }; /* stream addressable area */
2188
2189 /* no mode. nothing to be done */
2190 if (!mode)
2191 return;
2192
2193 /* Full screen scaling by default */
2194 src.width = mode->hdisplay;
2195 src.height = mode->vdisplay;
2196 dst.width = stream->timing.h_addressable;
2197 dst.height = stream->timing.v_addressable;
2198
2199 if (dm_state) {
2200 rmx_type = dm_state->scaling;
2201 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2202 if (src.width * dst.height <
2203 src.height * dst.width) {
2204 /* height needs less upscaling/more downscaling */
2205 dst.width = src.width *
2206 dst.height / src.height;
2207 } else {
2208 /* width needs less upscaling/more downscaling */
2209 dst.height = src.height *
2210 dst.width / src.width;
2211 }
2212 } else if (rmx_type == RMX_CENTER) {
2213 dst = src;
2214 }
2215
2216 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2217 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2218
2219 if (dm_state->underscan_enable) {
2220 dst.x += dm_state->underscan_hborder / 2;
2221 dst.y += dm_state->underscan_vborder / 2;
2222 dst.width -= dm_state->underscan_hborder;
2223 dst.height -= dm_state->underscan_vborder;
2224 }
2225 }
2226
2227 stream->src = src;
2228 stream->dst = dst;
2229
2230 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
2231 dst.x, dst.y, dst.width, dst.height);
2232
2233 }
2234
2235 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector)2236 convert_color_depth_from_display_info(const struct drm_connector *connector)
2237 {
2238 struct dm_connector_state *dm_conn_state =
2239 to_dm_connector_state(connector->state);
2240 uint32_t bpc = connector->display_info.bpc;
2241
2242 /* TODO: Remove this when there's support for max_bpc in drm */
2243 if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2244 /* Round down to nearest even number. */
2245 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2246
2247 switch (bpc) {
2248 case 0:
2249 /* Temporary Work around, DRM don't parse color depth for
2250 * EDID revision before 1.4
2251 * TODO: Fix edid parsing
2252 */
2253 return COLOR_DEPTH_888;
2254 case 6:
2255 return COLOR_DEPTH_666;
2256 case 8:
2257 return COLOR_DEPTH_888;
2258 case 10:
2259 return COLOR_DEPTH_101010;
2260 case 12:
2261 return COLOR_DEPTH_121212;
2262 case 14:
2263 return COLOR_DEPTH_141414;
2264 case 16:
2265 return COLOR_DEPTH_161616;
2266 default:
2267 return COLOR_DEPTH_UNDEFINED;
2268 }
2269 }
2270
2271 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)2272 get_aspect_ratio(const struct drm_display_mode *mode_in)
2273 {
2274 /* 1-1 mapping, since both enums follow the HDMI spec. */
2275 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
2276 }
2277
2278 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)2279 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2280 {
2281 enum dc_color_space color_space = COLOR_SPACE_SRGB;
2282
2283 switch (dc_crtc_timing->pixel_encoding) {
2284 case PIXEL_ENCODING_YCBCR422:
2285 case PIXEL_ENCODING_YCBCR444:
2286 case PIXEL_ENCODING_YCBCR420:
2287 {
2288 /*
2289 * 27030khz is the separation point between HDTV and SDTV
2290 * according to HDMI spec, we use YCbCr709 and YCbCr601
2291 * respectively
2292 */
2293 if (dc_crtc_timing->pix_clk_khz > 27030) {
2294 if (dc_crtc_timing->flags.Y_ONLY)
2295 color_space =
2296 COLOR_SPACE_YCBCR709_LIMITED;
2297 else
2298 color_space = COLOR_SPACE_YCBCR709;
2299 } else {
2300 if (dc_crtc_timing->flags.Y_ONLY)
2301 color_space =
2302 COLOR_SPACE_YCBCR601_LIMITED;
2303 else
2304 color_space = COLOR_SPACE_YCBCR601;
2305 }
2306
2307 }
2308 break;
2309 case PIXEL_ENCODING_RGB:
2310 color_space = COLOR_SPACE_SRGB;
2311 break;
2312
2313 default:
2314 WARN_ON(1);
2315 break;
2316 }
2317
2318 return color_space;
2319 }
2320
reduce_mode_colour_depth(struct dc_crtc_timing * timing_out)2321 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2322 {
2323 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2324 return;
2325
2326 timing_out->display_color_depth--;
2327 }
2328
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)2329 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2330 const struct drm_display_info *info)
2331 {
2332 int normalized_clk;
2333 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2334 return;
2335 do {
2336 normalized_clk = timing_out->pix_clk_khz;
2337 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2338 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2339 normalized_clk /= 2;
2340 /* Adjusting pix clock following on HDMI spec based on colour depth */
2341 switch (timing_out->display_color_depth) {
2342 case COLOR_DEPTH_101010:
2343 normalized_clk = (normalized_clk * 30) / 24;
2344 break;
2345 case COLOR_DEPTH_121212:
2346 normalized_clk = (normalized_clk * 36) / 24;
2347 break;
2348 case COLOR_DEPTH_161616:
2349 normalized_clk = (normalized_clk * 48) / 24;
2350 break;
2351 default:
2352 return;
2353 }
2354 if (normalized_clk <= info->max_tmds_clock)
2355 return;
2356 reduce_mode_colour_depth(timing_out);
2357
2358 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
2359
2360 }
2361 /*****************************************************************************/
2362
2363 static void
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector)2364 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2365 const struct drm_display_mode *mode_in,
2366 const struct drm_connector *connector)
2367 {
2368 struct dc_crtc_timing *timing_out = &stream->timing;
2369 const struct drm_display_info *info = &connector->display_info;
2370
2371 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2372
2373 timing_out->h_border_left = 0;
2374 timing_out->h_border_right = 0;
2375 timing_out->v_border_top = 0;
2376 timing_out->v_border_bottom = 0;
2377 /* TODO: un-hardcode */
2378 if (drm_mode_is_420_only(info, mode_in)
2379 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2380 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2381 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2382 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2383 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2384 else
2385 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2386
2387 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2388 timing_out->display_color_depth = convert_color_depth_from_display_info(
2389 connector);
2390 timing_out->scan_type = SCANNING_TYPE_NODATA;
2391 timing_out->hdmi_vic = 0;
2392 timing_out->vic = drm_match_cea_mode(mode_in);
2393
2394 timing_out->h_addressable = mode_in->crtc_hdisplay;
2395 timing_out->h_total = mode_in->crtc_htotal;
2396 timing_out->h_sync_width =
2397 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2398 timing_out->h_front_porch =
2399 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2400 timing_out->v_total = mode_in->crtc_vtotal;
2401 timing_out->v_addressable = mode_in->crtc_vdisplay;
2402 timing_out->v_front_porch =
2403 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2404 timing_out->v_sync_width =
2405 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2406 timing_out->pix_clk_khz = mode_in->crtc_clock;
2407 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2408 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2409 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2410 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2411 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2412
2413 stream->output_color_space = get_output_color_space(timing_out);
2414
2415 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2416 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2417 if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2418 adjust_colour_depth_from_display_info(timing_out, info);
2419 }
2420
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)2421 static void fill_audio_info(struct audio_info *audio_info,
2422 const struct drm_connector *drm_connector,
2423 const struct dc_sink *dc_sink)
2424 {
2425 int i = 0;
2426 int cea_revision = 0;
2427 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2428
2429 audio_info->manufacture_id = edid_caps->manufacturer_id;
2430 audio_info->product_id = edid_caps->product_id;
2431
2432 cea_revision = drm_connector->display_info.cea_rev;
2433
2434 strncpy(audio_info->display_name,
2435 edid_caps->display_name,
2436 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2437
2438 if (cea_revision >= 3) {
2439 audio_info->mode_count = edid_caps->audio_mode_count;
2440
2441 for (i = 0; i < audio_info->mode_count; ++i) {
2442 audio_info->modes[i].format_code =
2443 (enum audio_format_code)
2444 (edid_caps->audio_modes[i].format_code);
2445 audio_info->modes[i].channel_count =
2446 edid_caps->audio_modes[i].channel_count;
2447 audio_info->modes[i].sample_rates.all =
2448 edid_caps->audio_modes[i].sample_rate;
2449 audio_info->modes[i].sample_size =
2450 edid_caps->audio_modes[i].sample_size;
2451 }
2452 }
2453
2454 audio_info->flags.all = edid_caps->speaker_flags;
2455
2456 /* TODO: We only check for the progressive mode, check for interlace mode too */
2457 if (drm_connector->latency_present[0]) {
2458 audio_info->video_latency = drm_connector->video_latency[0];
2459 audio_info->audio_latency = drm_connector->audio_latency[0];
2460 }
2461
2462 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2463
2464 }
2465
2466 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)2467 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2468 struct drm_display_mode *dst_mode)
2469 {
2470 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2471 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2472 dst_mode->crtc_clock = src_mode->crtc_clock;
2473 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2474 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2475 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
2476 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2477 dst_mode->crtc_htotal = src_mode->crtc_htotal;
2478 dst_mode->crtc_hskew = src_mode->crtc_hskew;
2479 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2480 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2481 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2482 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2483 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2484 }
2485
2486 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)2487 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2488 const struct drm_display_mode *native_mode,
2489 bool scale_enabled)
2490 {
2491 if (scale_enabled) {
2492 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2493 } else if (native_mode->clock == drm_mode->clock &&
2494 native_mode->htotal == drm_mode->htotal &&
2495 native_mode->vtotal == drm_mode->vtotal) {
2496 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2497 } else {
2498 /* no scaling nor amdgpu inserted, no need to patch */
2499 }
2500 }
2501
2502 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)2503 create_fake_sink(struct amdgpu_dm_connector *aconnector)
2504 {
2505 struct dc_sink_init_data sink_init_data = { 0 };
2506 struct dc_sink *sink = NULL;
2507 sink_init_data.link = aconnector->dc_link;
2508 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2509
2510 sink = dc_sink_create(&sink_init_data);
2511 if (!sink) {
2512 DRM_ERROR("Failed to create sink!\n");
2513 return NULL;
2514 }
2515 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2516
2517 return sink;
2518 }
2519
set_multisync_trigger_params(struct dc_stream_state * stream)2520 static void set_multisync_trigger_params(
2521 struct dc_stream_state *stream)
2522 {
2523 if (stream->triggered_crtc_reset.enabled) {
2524 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
2525 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
2526 }
2527 }
2528
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)2529 static void set_master_stream(struct dc_stream_state *stream_set[],
2530 int stream_count)
2531 {
2532 int j, highest_rfr = 0, master_stream = 0;
2533
2534 for (j = 0; j < stream_count; j++) {
2535 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
2536 int refresh_rate = 0;
2537
2538 refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
2539 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
2540 if (refresh_rate > highest_rfr) {
2541 highest_rfr = refresh_rate;
2542 master_stream = j;
2543 }
2544 }
2545 }
2546 for (j = 0; j < stream_count; j++) {
2547 if (stream_set[j])
2548 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
2549 }
2550 }
2551
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)2552 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
2553 {
2554 int i = 0;
2555
2556 if (context->stream_count < 2)
2557 return;
2558 for (i = 0; i < context->stream_count ; i++) {
2559 if (!context->streams[i])
2560 continue;
2561 /* TODO: add a function to read AMD VSDB bits and will set
2562 * crtc_sync_master.multi_sync_enabled flag
2563 * For now its set to false
2564 */
2565 set_multisync_trigger_params(context->streams[i]);
2566 }
2567 set_master_stream(context->streams, context->stream_count);
2568 }
2569
2570 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state)2571 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2572 const struct drm_display_mode *drm_mode,
2573 const struct dm_connector_state *dm_state)
2574 {
2575 struct drm_display_mode *preferred_mode = NULL;
2576 struct drm_connector *drm_connector;
2577 struct dc_stream_state *stream = NULL;
2578 struct drm_display_mode mode = *drm_mode;
2579 bool native_mode_found = false;
2580 struct dc_sink *sink = NULL;
2581 if (aconnector == NULL) {
2582 DRM_ERROR("aconnector is NULL!\n");
2583 return stream;
2584 }
2585
2586 drm_connector = &aconnector->base;
2587
2588 if (!aconnector->dc_sink) {
2589 /*
2590 * Create dc_sink when necessary to MST
2591 * Don't apply fake_sink to MST
2592 */
2593 if (aconnector->mst_port) {
2594 dm_dp_mst_dc_sink_create(drm_connector);
2595 return stream;
2596 }
2597
2598 sink = create_fake_sink(aconnector);
2599 if (!sink)
2600 return stream;
2601 } else {
2602 sink = aconnector->dc_sink;
2603 }
2604
2605 stream = dc_create_stream_for_sink(sink);
2606
2607 if (stream == NULL) {
2608 DRM_ERROR("Failed to create stream for sink!\n");
2609 goto finish;
2610 }
2611
2612 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2613 /* Search for preferred mode */
2614 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2615 native_mode_found = true;
2616 break;
2617 }
2618 }
2619 if (!native_mode_found)
2620 preferred_mode = list_first_entry_or_null(
2621 &aconnector->base.modes,
2622 struct drm_display_mode,
2623 head);
2624
2625 if (preferred_mode == NULL) {
2626 /* This may not be an error, the use case is when we we have no
2627 * usermode calls to reset and set mode upon hotplug. In this
2628 * case, we call set mode ourselves to restore the previous mode
2629 * and the modelist may not be filled in in time.
2630 */
2631 DRM_DEBUG_DRIVER("No preferred mode found\n");
2632 } else {
2633 decide_crtc_timing_for_drm_display_mode(
2634 &mode, preferred_mode,
2635 dm_state ? (dm_state->scaling != RMX_OFF) : false);
2636 }
2637
2638 if (!dm_state)
2639 drm_mode_set_crtcinfo(&mode, 0);
2640
2641 fill_stream_properties_from_drm_display_mode(stream,
2642 &mode, &aconnector->base);
2643 update_stream_scaling_settings(&mode, dm_state, stream);
2644
2645 fill_audio_info(
2646 &stream->audio_info,
2647 drm_connector,
2648 sink);
2649
2650 update_stream_signal(stream);
2651
2652 if (dm_state && dm_state->freesync_capable)
2653 stream->ignore_msa_timing_param = true;
2654 finish:
2655 if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
2656 dc_sink_release(sink);
2657
2658 return stream;
2659 }
2660
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)2661 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2662 {
2663 drm_crtc_cleanup(crtc);
2664 kfree(crtc);
2665 }
2666
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)2667 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2668 struct drm_crtc_state *state)
2669 {
2670 struct dm_crtc_state *cur = to_dm_crtc_state(state);
2671
2672 /* TODO Destroy dc_stream objects are stream object is flattened */
2673 if (cur->stream)
2674 dc_stream_release(cur->stream);
2675
2676
2677 __drm_atomic_helper_crtc_destroy_state(state);
2678
2679
2680 kfree(state);
2681 }
2682
dm_crtc_reset_state(struct drm_crtc * crtc)2683 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2684 {
2685 struct dm_crtc_state *state;
2686
2687 if (crtc->state)
2688 dm_crtc_destroy_state(crtc, crtc->state);
2689
2690 state = kzalloc(sizeof(*state), GFP_KERNEL);
2691 if (WARN_ON(!state))
2692 return;
2693
2694 crtc->state = &state->base;
2695 crtc->state->crtc = crtc;
2696
2697 }
2698
2699 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)2700 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2701 {
2702 struct dm_crtc_state *state, *cur;
2703
2704 cur = to_dm_crtc_state(crtc->state);
2705
2706 if (WARN_ON(!crtc->state))
2707 return NULL;
2708
2709 state = kzalloc(sizeof(*state), GFP_KERNEL);
2710 if (!state)
2711 return NULL;
2712
2713 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2714
2715 if (cur->stream) {
2716 state->stream = cur->stream;
2717 dc_stream_retain(state->stream);
2718 }
2719
2720 /* TODO Duplicate dc_stream after objects are stream object is flattened */
2721
2722 return &state->base;
2723 }
2724
2725
dm_set_vblank(struct drm_crtc * crtc,bool enable)2726 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2727 {
2728 enum dc_irq_source irq_source;
2729 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2730 struct amdgpu_device *adev = crtc->dev->dev_private;
2731
2732 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2733 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2734 }
2735
dm_enable_vblank(struct drm_crtc * crtc)2736 static int dm_enable_vblank(struct drm_crtc *crtc)
2737 {
2738 return dm_set_vblank(crtc, true);
2739 }
2740
dm_disable_vblank(struct drm_crtc * crtc)2741 static void dm_disable_vblank(struct drm_crtc *crtc)
2742 {
2743 dm_set_vblank(crtc, false);
2744 }
2745
2746 /* Implemented only the options currently availible for the driver */
2747 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2748 .reset = dm_crtc_reset_state,
2749 .destroy = amdgpu_dm_crtc_destroy,
2750 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2751 .set_config = drm_atomic_helper_set_config,
2752 .page_flip = drm_atomic_helper_page_flip,
2753 .atomic_duplicate_state = dm_crtc_duplicate_state,
2754 .atomic_destroy_state = dm_crtc_destroy_state,
2755 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
2756 .enable_vblank = dm_enable_vblank,
2757 .disable_vblank = dm_disable_vblank,
2758 };
2759
2760 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)2761 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2762 {
2763 bool connected;
2764 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2765
2766 /* Notes:
2767 * 1. This interface is NOT called in context of HPD irq.
2768 * 2. This interface *is called* in context of user-mode ioctl. Which
2769 * makes it a bad place for *any* MST-related activit. */
2770
2771 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2772 !aconnector->fake_enable)
2773 connected = (aconnector->dc_sink != NULL);
2774 else
2775 connected = (aconnector->base.force == DRM_FORCE_ON);
2776
2777 return (connected ? connector_status_connected :
2778 connector_status_disconnected);
2779 }
2780
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)2781 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2782 struct drm_connector_state *connector_state,
2783 struct drm_property *property,
2784 uint64_t val)
2785 {
2786 struct drm_device *dev = connector->dev;
2787 struct amdgpu_device *adev = dev->dev_private;
2788 struct dm_connector_state *dm_old_state =
2789 to_dm_connector_state(connector->state);
2790 struct dm_connector_state *dm_new_state =
2791 to_dm_connector_state(connector_state);
2792
2793 int ret = -EINVAL;
2794
2795 if (property == dev->mode_config.scaling_mode_property) {
2796 enum amdgpu_rmx_type rmx_type;
2797
2798 switch (val) {
2799 case DRM_MODE_SCALE_CENTER:
2800 rmx_type = RMX_CENTER;
2801 break;
2802 case DRM_MODE_SCALE_ASPECT:
2803 rmx_type = RMX_ASPECT;
2804 break;
2805 case DRM_MODE_SCALE_FULLSCREEN:
2806 rmx_type = RMX_FULL;
2807 break;
2808 case DRM_MODE_SCALE_NONE:
2809 default:
2810 rmx_type = RMX_OFF;
2811 break;
2812 }
2813
2814 if (dm_old_state->scaling == rmx_type)
2815 return 0;
2816
2817 dm_new_state->scaling = rmx_type;
2818 ret = 0;
2819 } else if (property == adev->mode_info.underscan_hborder_property) {
2820 dm_new_state->underscan_hborder = val;
2821 ret = 0;
2822 } else if (property == adev->mode_info.underscan_vborder_property) {
2823 dm_new_state->underscan_vborder = val;
2824 ret = 0;
2825 } else if (property == adev->mode_info.underscan_property) {
2826 dm_new_state->underscan_enable = val;
2827 ret = 0;
2828 } else if (property == adev->mode_info.max_bpc_property) {
2829 dm_new_state->max_bpc = val;
2830 ret = 0;
2831 }
2832
2833 return ret;
2834 }
2835
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)2836 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2837 const struct drm_connector_state *state,
2838 struct drm_property *property,
2839 uint64_t *val)
2840 {
2841 struct drm_device *dev = connector->dev;
2842 struct amdgpu_device *adev = dev->dev_private;
2843 struct dm_connector_state *dm_state =
2844 to_dm_connector_state(state);
2845 int ret = -EINVAL;
2846
2847 if (property == dev->mode_config.scaling_mode_property) {
2848 switch (dm_state->scaling) {
2849 case RMX_CENTER:
2850 *val = DRM_MODE_SCALE_CENTER;
2851 break;
2852 case RMX_ASPECT:
2853 *val = DRM_MODE_SCALE_ASPECT;
2854 break;
2855 case RMX_FULL:
2856 *val = DRM_MODE_SCALE_FULLSCREEN;
2857 break;
2858 case RMX_OFF:
2859 default:
2860 *val = DRM_MODE_SCALE_NONE;
2861 break;
2862 }
2863 ret = 0;
2864 } else if (property == adev->mode_info.underscan_hborder_property) {
2865 *val = dm_state->underscan_hborder;
2866 ret = 0;
2867 } else if (property == adev->mode_info.underscan_vborder_property) {
2868 *val = dm_state->underscan_vborder;
2869 ret = 0;
2870 } else if (property == adev->mode_info.underscan_property) {
2871 *val = dm_state->underscan_enable;
2872 ret = 0;
2873 } else if (property == adev->mode_info.max_bpc_property) {
2874 *val = dm_state->max_bpc;
2875 ret = 0;
2876 }
2877 return ret;
2878 }
2879
amdgpu_dm_connector_destroy(struct drm_connector * connector)2880 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2881 {
2882 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2883 const struct dc_link *link = aconnector->dc_link;
2884 struct amdgpu_device *adev = connector->dev->dev_private;
2885 struct amdgpu_display_manager *dm = &adev->dm;
2886
2887 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2888 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2889 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2890 link->type != dc_connection_none &&
2891 dm->backlight_dev) {
2892 #if 0
2893 backlight_device_unregister(dm->backlight_dev);
2894 #endif
2895 dm->backlight_dev = NULL;
2896 }
2897 #endif
2898 drm_connector_unregister(connector);
2899 drm_connector_cleanup(connector);
2900 kfree(connector);
2901 }
2902
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)2903 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2904 {
2905 struct dm_connector_state *state =
2906 to_dm_connector_state(connector->state);
2907
2908 if (connector->state)
2909 __drm_atomic_helper_connector_destroy_state(connector->state);
2910
2911 kfree(state);
2912
2913 state = kzalloc(sizeof(*state), GFP_KERNEL);
2914
2915 if (state) {
2916 state->scaling = RMX_OFF;
2917 state->underscan_enable = false;
2918 state->underscan_hborder = 0;
2919 state->underscan_vborder = 0;
2920 state->max_bpc = 8;
2921
2922 __drm_atomic_helper_connector_reset(connector, &state->base);
2923 }
2924 }
2925
2926 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)2927 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2928 {
2929 struct dm_connector_state *state =
2930 to_dm_connector_state(connector->state);
2931
2932 struct dm_connector_state *new_state =
2933 kmemdup(state, sizeof(*state), GFP_KERNEL);
2934
2935 if (new_state) {
2936 __drm_atomic_helper_connector_duplicate_state(connector,
2937 &new_state->base);
2938 new_state->max_bpc = state->max_bpc;
2939 return &new_state->base;
2940 }
2941
2942 return NULL;
2943 }
2944
2945 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2946 .reset = amdgpu_dm_connector_funcs_reset,
2947 .detect = amdgpu_dm_connector_detect,
2948 .fill_modes = drm_helper_probe_single_connector_modes,
2949 .destroy = amdgpu_dm_connector_destroy,
2950 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2951 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2952 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2953 .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2954 };
2955
best_encoder(struct drm_connector * connector)2956 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2957 {
2958 int enc_id = connector->encoder_ids[0];
2959 struct drm_mode_object *obj;
2960 struct drm_encoder *encoder;
2961
2962 DRM_DEBUG_DRIVER("Finding the best encoder\n");
2963
2964 /* pick the encoder ids */
2965 if (enc_id) {
2966 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2967 if (!obj) {
2968 DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2969 return NULL;
2970 }
2971 encoder = obj_to_encoder(obj);
2972 return encoder;
2973 }
2974 DRM_ERROR("No encoder id\n");
2975 return NULL;
2976 }
2977
get_modes(struct drm_connector * connector)2978 static int get_modes(struct drm_connector *connector)
2979 {
2980 return amdgpu_dm_connector_get_modes(connector);
2981 }
2982
create_eml_sink(struct amdgpu_dm_connector * aconnector)2983 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2984 {
2985 struct dc_sink_init_data init_params = {
2986 .link = aconnector->dc_link,
2987 .sink_signal = SIGNAL_TYPE_VIRTUAL
2988 };
2989 struct edid *edid;
2990
2991 if (!aconnector->base.edid_blob_ptr) {
2992 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2993 aconnector->base.name);
2994
2995 aconnector->base.force = DRM_FORCE_OFF;
2996 aconnector->base.override_edid = false;
2997 return;
2998 }
2999
3000 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3001
3002 aconnector->edid = edid;
3003
3004 aconnector->dc_em_sink = dc_link_add_remote_sink(
3005 aconnector->dc_link,
3006 (uint8_t *)edid,
3007 (edid->extensions + 1) * EDID_LENGTH,
3008 &init_params);
3009
3010 if (aconnector->base.force == DRM_FORCE_ON)
3011 aconnector->dc_sink = aconnector->dc_link->local_sink ?
3012 aconnector->dc_link->local_sink :
3013 aconnector->dc_em_sink;
3014 }
3015
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)3016 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3017 {
3018 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3019
3020 /* In case of headless boot with force on for DP managed connector
3021 * Those settings have to be != 0 to get initial modeset
3022 */
3023 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3024 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3025 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3026 }
3027
3028
3029 aconnector->base.override_edid = true;
3030 create_eml_sink(aconnector);
3031 }
3032
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)3033 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3034 struct drm_display_mode *mode)
3035 {
3036 int result = MODE_ERROR;
3037 struct dc_sink *dc_sink;
3038 struct amdgpu_device *adev = connector->dev->dev_private;
3039 /* TODO: Unhardcode stream count */
3040 struct dc_stream_state *stream;
3041 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3042 enum dc_status dc_result = DC_OK;
3043
3044 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3045 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3046 return result;
3047
3048 /* Only run this the first time mode_valid is called to initilialize
3049 * EDID mgmt
3050 */
3051 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3052 !aconnector->dc_em_sink)
3053 handle_edid_mgmt(aconnector);
3054
3055 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3056
3057 if (dc_sink == NULL) {
3058 DRM_ERROR("dc_sink is NULL!\n");
3059 goto fail;
3060 }
3061
3062 stream = create_stream_for_sink(aconnector, mode, NULL);
3063 if (stream == NULL) {
3064 DRM_ERROR("Failed to create stream for sink!\n");
3065 goto fail;
3066 }
3067
3068 dc_result = dc_validate_stream(adev->dm.dc, stream);
3069
3070 if (dc_result == DC_OK)
3071 result = MODE_OK;
3072 else
3073 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3074 mode->vdisplay,
3075 mode->hdisplay,
3076 mode->clock,
3077 dc_result);
3078
3079 dc_stream_release(stream);
3080
3081 fail:
3082 /* TODO: error handling*/
3083 return result;
3084 }
3085
3086 static const struct drm_connector_helper_funcs
3087 amdgpu_dm_connector_helper_funcs = {
3088 /*
3089 * If hotplug a second bigger display in FB Con mode, bigger resolution
3090 * modes will be filtered by drm_mode_validate_size(), and those modes
3091 * is missing after user start lightdm. So we need to renew modes list.
3092 * in get_modes call back, not just return the modes count
3093 */
3094 .get_modes = get_modes,
3095 .mode_valid = amdgpu_dm_connector_mode_valid,
3096 .best_encoder = best_encoder
3097 };
3098
dm_crtc_helper_disable(struct drm_crtc * crtc)3099 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
3100 {
3101 }
3102
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)3103 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
3104 struct drm_crtc_state *state)
3105 {
3106 struct amdgpu_device *adev = crtc->dev->dev_private;
3107 struct dc *dc = adev->dm.dc;
3108 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
3109 int ret = -EINVAL;
3110
3111 if (unlikely(!dm_crtc_state->stream &&
3112 modeset_required(state, NULL, dm_crtc_state->stream))) {
3113 WARN_ON(1);
3114 return ret;
3115 }
3116
3117 /* In some use cases, like reset, no stream is attached */
3118 if (!dm_crtc_state->stream)
3119 return 0;
3120
3121 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
3122 return 0;
3123
3124 return ret;
3125 }
3126
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3127 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
3128 const struct drm_display_mode *mode,
3129 struct drm_display_mode *adjusted_mode)
3130 {
3131 return true;
3132 }
3133
3134 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
3135 .disable = dm_crtc_helper_disable,
3136 .atomic_check = dm_crtc_helper_atomic_check,
3137 .mode_fixup = dm_crtc_helper_mode_fixup
3138 };
3139
dm_encoder_helper_disable(struct drm_encoder * encoder)3140 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
3141 {
3142
3143 }
3144
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)3145 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
3146 struct drm_crtc_state *crtc_state,
3147 struct drm_connector_state *conn_state)
3148 {
3149 return 0;
3150 }
3151
3152 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
3153 .disable = dm_encoder_helper_disable,
3154 .atomic_check = dm_encoder_helper_atomic_check
3155 };
3156
dm_drm_plane_reset(struct drm_plane * plane)3157 static void dm_drm_plane_reset(struct drm_plane *plane)
3158 {
3159 struct dm_plane_state *amdgpu_state = NULL;
3160
3161 if (plane->state)
3162 plane->funcs->atomic_destroy_state(plane, plane->state);
3163
3164 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
3165 WARN_ON(amdgpu_state == NULL);
3166
3167 if (amdgpu_state) {
3168 plane->state = &amdgpu_state->base;
3169 plane->state->plane = plane;
3170 plane->state->rotation = DRM_MODE_ROTATE_0;
3171 }
3172 }
3173
3174 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)3175 dm_drm_plane_duplicate_state(struct drm_plane *plane)
3176 {
3177 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
3178
3179 old_dm_plane_state = to_dm_plane_state(plane->state);
3180 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
3181 if (!dm_plane_state)
3182 return NULL;
3183
3184 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
3185
3186 if (old_dm_plane_state->dc_state) {
3187 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3188 dc_plane_state_retain(dm_plane_state->dc_state);
3189 }
3190
3191 return &dm_plane_state->base;
3192 }
3193
3194 static
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)3195 void dm_drm_plane_destroy_state(struct drm_plane *plane,
3196 struct drm_plane_state *state)
3197 {
3198 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3199
3200 if (dm_plane_state->dc_state)
3201 dc_plane_state_release(dm_plane_state->dc_state);
3202
3203 drm_atomic_helper_plane_destroy_state(plane, state);
3204 }
3205
3206 static const struct drm_plane_funcs dm_plane_funcs = {
3207 .update_plane = drm_atomic_helper_update_plane,
3208 .disable_plane = drm_atomic_helper_disable_plane,
3209 .destroy = drm_primary_helper_destroy,
3210 .reset = dm_drm_plane_reset,
3211 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3212 .atomic_destroy_state = dm_drm_plane_destroy_state,
3213 };
3214
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)3215 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3216 struct drm_plane_state *new_state)
3217 {
3218 struct amdgpu_framebuffer *afb;
3219 struct drm_gem_object *obj;
3220 struct amdgpu_device *adev;
3221 struct amdgpu_bo *rbo;
3222 uint64_t chroma_addr = 0;
3223 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3224 unsigned int awidth;
3225 uint32_t domain;
3226 int r;
3227
3228 dm_plane_state_old = to_dm_plane_state(plane->state);
3229 dm_plane_state_new = to_dm_plane_state(new_state);
3230
3231 if (!new_state->fb) {
3232 DRM_DEBUG_DRIVER("No FB bound\n");
3233 return 0;
3234 }
3235
3236 afb = to_amdgpu_framebuffer(new_state->fb);
3237 obj = new_state->fb->obj[0];
3238 rbo = gem_to_amdgpu_bo(obj);
3239 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3240 r = amdgpu_bo_reserve(rbo, false);
3241 if (unlikely(r != 0))
3242 return r;
3243
3244 if (plane->type != DRM_PLANE_TYPE_CURSOR)
3245 domain = amdgpu_display_supported_domains(adev);
3246 else
3247 domain = AMDGPU_GEM_DOMAIN_VRAM;
3248
3249 r = amdgpu_bo_pin(rbo, domain);
3250 if (unlikely(r != 0)) {
3251 if (r != -ERESTARTSYS)
3252 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3253 amdgpu_bo_unreserve(rbo);
3254 return r;
3255 }
3256
3257 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3258 if (unlikely(r != 0)) {
3259 amdgpu_bo_unpin(rbo);
3260 amdgpu_bo_unreserve(rbo);
3261 DRM_ERROR("%p bind failed\n", rbo);
3262 return r;
3263 }
3264 amdgpu_bo_unreserve(rbo);
3265
3266 afb->address = amdgpu_bo_gpu_offset(rbo);
3267
3268 amdgpu_bo_ref(rbo);
3269
3270 if (dm_plane_state_new->dc_state &&
3271 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3272 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3273
3274 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3275 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
3276 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
3277 } else {
3278 awidth = ALIGN(new_state->fb->width, 64);
3279 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3280 plane_state->address.video_progressive.luma_addr.low_part
3281 = lower_32_bits(afb->address);
3282 plane_state->address.video_progressive.luma_addr.high_part
3283 = upper_32_bits(afb->address);
3284 chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
3285 plane_state->address.video_progressive.chroma_addr.low_part
3286 = lower_32_bits(chroma_addr);
3287 plane_state->address.video_progressive.chroma_addr.high_part
3288 = upper_32_bits(chroma_addr);
3289 }
3290 }
3291
3292 return 0;
3293 }
3294
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)3295 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3296 struct drm_plane_state *old_state)
3297 {
3298 struct amdgpu_bo *rbo;
3299 int r;
3300
3301 if (!old_state->fb)
3302 return;
3303
3304 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
3305 r = amdgpu_bo_reserve(rbo, false);
3306 if (unlikely(r)) {
3307 DRM_ERROR("failed to reserve rbo before unpin\n");
3308 return;
3309 }
3310
3311 amdgpu_bo_unpin(rbo);
3312 amdgpu_bo_unreserve(rbo);
3313 amdgpu_bo_unref(&rbo);
3314 }
3315
dm_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)3316 static int dm_plane_atomic_check(struct drm_plane *plane,
3317 struct drm_plane_state *state)
3318 {
3319 struct amdgpu_device *adev = plane->dev->dev_private;
3320 struct dc *dc = adev->dm.dc;
3321 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3322
3323 if (!dm_plane_state->dc_state)
3324 return 0;
3325
3326 if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3327 return -EINVAL;
3328
3329 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3330 return 0;
3331
3332 return -EINVAL;
3333 }
3334
3335 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3336 .prepare_fb = dm_plane_helper_prepare_fb,
3337 .cleanup_fb = dm_plane_helper_cleanup_fb,
3338 .atomic_check = dm_plane_atomic_check,
3339 };
3340
3341 /*
3342 * TODO: these are currently initialized to rgb formats only.
3343 * For future use cases we should either initialize them dynamically based on
3344 * plane capabilities, or initialize this array to all formats, so internal drm
3345 * check will succeed, and let DC to implement proper check
3346 */
3347 static const uint32_t rgb_formats[] = {
3348 DRM_FORMAT_RGB888,
3349 DRM_FORMAT_XRGB8888,
3350 DRM_FORMAT_ARGB8888,
3351 DRM_FORMAT_RGBA8888,
3352 DRM_FORMAT_XRGB2101010,
3353 DRM_FORMAT_XBGR2101010,
3354 DRM_FORMAT_ARGB2101010,
3355 DRM_FORMAT_ABGR2101010,
3356 };
3357
3358 static const uint32_t yuv_formats[] = {
3359 DRM_FORMAT_NV12,
3360 DRM_FORMAT_NV21,
3361 };
3362
3363 static const u32 cursor_formats[] = {
3364 DRM_FORMAT_ARGB8888
3365 };
3366
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct amdgpu_plane * aplane,unsigned long possible_crtcs)3367 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3368 struct amdgpu_plane *aplane,
3369 unsigned long possible_crtcs)
3370 {
3371 int res = -EPERM;
3372
3373 switch (aplane->base.type) {
3374 case DRM_PLANE_TYPE_PRIMARY:
3375 res = drm_universal_plane_init(
3376 dm->adev->ddev,
3377 &aplane->base,
3378 possible_crtcs,
3379 &dm_plane_funcs,
3380 rgb_formats,
3381 ARRAY_SIZE(rgb_formats),
3382 NULL, aplane->base.type, NULL);
3383 break;
3384 case DRM_PLANE_TYPE_OVERLAY:
3385 res = drm_universal_plane_init(
3386 dm->adev->ddev,
3387 &aplane->base,
3388 possible_crtcs,
3389 &dm_plane_funcs,
3390 yuv_formats,
3391 ARRAY_SIZE(yuv_formats),
3392 NULL, aplane->base.type, NULL);
3393 break;
3394 case DRM_PLANE_TYPE_CURSOR:
3395 res = drm_universal_plane_init(
3396 dm->adev->ddev,
3397 &aplane->base,
3398 possible_crtcs,
3399 &dm_plane_funcs,
3400 cursor_formats,
3401 ARRAY_SIZE(cursor_formats),
3402 NULL, aplane->base.type, NULL);
3403 break;
3404 }
3405
3406 drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3407
3408 /* Create (reset) the plane state */
3409 if (aplane->base.funcs->reset)
3410 aplane->base.funcs->reset(&aplane->base);
3411
3412
3413 return res;
3414 }
3415
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)3416 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3417 struct drm_plane *plane,
3418 uint32_t crtc_index)
3419 {
3420 struct amdgpu_crtc *acrtc = NULL;
3421 struct amdgpu_plane *cursor_plane;
3422
3423 int res = -ENOMEM;
3424
3425 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3426 if (!cursor_plane)
3427 goto fail;
3428
3429 cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3430 res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3431
3432 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3433 if (!acrtc)
3434 goto fail;
3435
3436 res = drm_crtc_init_with_planes(
3437 dm->ddev,
3438 &acrtc->base,
3439 plane,
3440 &cursor_plane->base,
3441 &amdgpu_dm_crtc_funcs, NULL);
3442
3443 if (res)
3444 goto fail;
3445
3446 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3447
3448 /* Create (reset) the plane state */
3449 if (acrtc->base.funcs->reset)
3450 acrtc->base.funcs->reset(&acrtc->base);
3451
3452 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3453 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3454
3455 acrtc->crtc_id = crtc_index;
3456 acrtc->base.enabled = false;
3457
3458 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3459 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
3460 true, MAX_COLOR_LUT_ENTRIES);
3461 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
3462
3463 return 0;
3464
3465 fail:
3466 kfree(acrtc);
3467 kfree(cursor_plane);
3468 return res;
3469 }
3470
3471
to_drm_connector_type(enum signal_type st)3472 static int to_drm_connector_type(enum signal_type st)
3473 {
3474 switch (st) {
3475 case SIGNAL_TYPE_HDMI_TYPE_A:
3476 return DRM_MODE_CONNECTOR_HDMIA;
3477 case SIGNAL_TYPE_EDP:
3478 return DRM_MODE_CONNECTOR_eDP;
3479 case SIGNAL_TYPE_RGB:
3480 return DRM_MODE_CONNECTOR_VGA;
3481 case SIGNAL_TYPE_DISPLAY_PORT:
3482 case SIGNAL_TYPE_DISPLAY_PORT_MST:
3483 return DRM_MODE_CONNECTOR_DisplayPort;
3484 case SIGNAL_TYPE_DVI_DUAL_LINK:
3485 case SIGNAL_TYPE_DVI_SINGLE_LINK:
3486 return DRM_MODE_CONNECTOR_DVID;
3487 case SIGNAL_TYPE_VIRTUAL:
3488 return DRM_MODE_CONNECTOR_VIRTUAL;
3489
3490 default:
3491 return DRM_MODE_CONNECTOR_Unknown;
3492 }
3493 }
3494
amdgpu_dm_get_native_mode(struct drm_connector * connector)3495 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3496 {
3497 const struct drm_connector_helper_funcs *helper =
3498 connector->helper_private;
3499 struct drm_encoder *encoder;
3500 struct amdgpu_encoder *amdgpu_encoder;
3501
3502 encoder = helper->best_encoder(connector);
3503
3504 if (encoder == NULL)
3505 return;
3506
3507 amdgpu_encoder = to_amdgpu_encoder(encoder);
3508
3509 amdgpu_encoder->native_mode.clock = 0;
3510
3511 if (!list_empty(&connector->probed_modes)) {
3512 struct drm_display_mode *preferred_mode = NULL;
3513
3514 list_for_each_entry(preferred_mode,
3515 &connector->probed_modes,
3516 head) {
3517 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3518 amdgpu_encoder->native_mode = *preferred_mode;
3519
3520 break;
3521 }
3522
3523 }
3524 }
3525
3526 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)3527 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3528 char *name,
3529 int hdisplay, int vdisplay)
3530 {
3531 struct drm_device *dev = encoder->dev;
3532 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3533 struct drm_display_mode *mode = NULL;
3534 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3535
3536 mode = drm_mode_duplicate(dev, native_mode);
3537
3538 if (mode == NULL)
3539 return NULL;
3540
3541 mode->hdisplay = hdisplay;
3542 mode->vdisplay = vdisplay;
3543 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3544 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3545
3546 return mode;
3547
3548 }
3549
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)3550 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3551 struct drm_connector *connector)
3552 {
3553 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3554 struct drm_display_mode *mode = NULL;
3555 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3556 struct amdgpu_dm_connector *amdgpu_dm_connector =
3557 to_amdgpu_dm_connector(connector);
3558 int i;
3559 int n;
3560 struct mode_size {
3561 char name[DRM_DISPLAY_MODE_LEN];
3562 int w;
3563 int h;
3564 } common_modes[] = {
3565 { "640x480", 640, 480},
3566 { "800x600", 800, 600},
3567 { "1024x768", 1024, 768},
3568 { "1280x720", 1280, 720},
3569 { "1280x800", 1280, 800},
3570 {"1280x1024", 1280, 1024},
3571 { "1440x900", 1440, 900},
3572 {"1680x1050", 1680, 1050},
3573 {"1600x1200", 1600, 1200},
3574 {"1920x1080", 1920, 1080},
3575 {"1920x1200", 1920, 1200}
3576 };
3577
3578 n = ARRAY_SIZE(common_modes);
3579
3580 for (i = 0; i < n; i++) {
3581 struct drm_display_mode *curmode = NULL;
3582 bool mode_existed = false;
3583
3584 if (common_modes[i].w > native_mode->hdisplay ||
3585 common_modes[i].h > native_mode->vdisplay ||
3586 (common_modes[i].w == native_mode->hdisplay &&
3587 common_modes[i].h == native_mode->vdisplay))
3588 continue;
3589
3590 list_for_each_entry(curmode, &connector->probed_modes, head) {
3591 if (common_modes[i].w == curmode->hdisplay &&
3592 common_modes[i].h == curmode->vdisplay) {
3593 mode_existed = true;
3594 break;
3595 }
3596 }
3597
3598 if (mode_existed)
3599 continue;
3600
3601 mode = amdgpu_dm_create_common_mode(encoder,
3602 common_modes[i].name, common_modes[i].w,
3603 common_modes[i].h);
3604 drm_mode_probed_add(connector, mode);
3605 amdgpu_dm_connector->num_modes++;
3606 }
3607 }
3608
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)3609 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3610 struct edid *edid)
3611 {
3612 struct amdgpu_dm_connector *amdgpu_dm_connector =
3613 to_amdgpu_dm_connector(connector);
3614
3615 if (edid) {
3616 /* empty probed_modes */
3617 INIT_LIST_HEAD(&connector->probed_modes);
3618 amdgpu_dm_connector->num_modes =
3619 drm_add_edid_modes(connector, edid);
3620
3621 amdgpu_dm_get_native_mode(connector);
3622 } else {
3623 amdgpu_dm_connector->num_modes = 0;
3624 }
3625 }
3626
amdgpu_dm_connector_get_modes(struct drm_connector * connector)3627 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3628 {
3629 const struct drm_connector_helper_funcs *helper =
3630 connector->helper_private;
3631 struct amdgpu_dm_connector *amdgpu_dm_connector =
3632 to_amdgpu_dm_connector(connector);
3633 struct drm_encoder *encoder;
3634 struct edid *edid = amdgpu_dm_connector->edid;
3635
3636 encoder = helper->best_encoder(connector);
3637
3638 if (!edid || !drm_edid_is_valid(edid)) {
3639 drm_add_modes_noedid(connector, 640, 480);
3640 } else {
3641 amdgpu_dm_connector_ddc_get_modes(connector, edid);
3642 amdgpu_dm_connector_add_common_modes(encoder, connector);
3643 }
3644 amdgpu_dm_fbc_init(connector);
3645
3646 return amdgpu_dm_connector->num_modes;
3647 }
3648
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)3649 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3650 struct amdgpu_dm_connector *aconnector,
3651 int connector_type,
3652 struct dc_link *link,
3653 int link_index)
3654 {
3655 struct amdgpu_device *adev = dm->ddev->dev_private;
3656
3657 /*
3658 * Some of the properties below require access to state, like bpc.
3659 * Allocate some default initial connector state with our reset helper.
3660 */
3661 if (aconnector->base.funcs->reset)
3662 aconnector->base.funcs->reset(&aconnector->base);
3663
3664 aconnector->connector_id = link_index;
3665 aconnector->dc_link = link;
3666 aconnector->base.interlace_allowed = false;
3667 aconnector->base.doublescan_allowed = false;
3668 aconnector->base.stereo_allowed = false;
3669 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3670 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3671 lockinit(&aconnector->hpd_lock, "agdchpdl", 0, LK_CANRECURSE);
3672
3673 /* configure support HPD hot plug connector_>polled default value is 0
3674 * which means HPD hot plug not supported
3675 */
3676 switch (connector_type) {
3677 case DRM_MODE_CONNECTOR_HDMIA:
3678 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3679 aconnector->base.ycbcr_420_allowed =
3680 link->link_enc->features.ycbcr420_supported ? true : false;
3681 break;
3682 case DRM_MODE_CONNECTOR_DisplayPort:
3683 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3684 aconnector->base.ycbcr_420_allowed =
3685 link->link_enc->features.ycbcr420_supported ? true : false;
3686 break;
3687 case DRM_MODE_CONNECTOR_DVID:
3688 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3689 break;
3690 default:
3691 break;
3692 }
3693
3694 drm_object_attach_property(&aconnector->base.base,
3695 dm->ddev->mode_config.scaling_mode_property,
3696 DRM_MODE_SCALE_NONE);
3697
3698 drm_object_attach_property(&aconnector->base.base,
3699 adev->mode_info.underscan_property,
3700 UNDERSCAN_OFF);
3701 drm_object_attach_property(&aconnector->base.base,
3702 adev->mode_info.underscan_hborder_property,
3703 0);
3704 drm_object_attach_property(&aconnector->base.base,
3705 adev->mode_info.underscan_vborder_property,
3706 0);
3707 drm_object_attach_property(&aconnector->base.base,
3708 adev->mode_info.max_bpc_property,
3709 0);
3710
3711 }
3712
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)3713 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3714 struct i2c_msg *msgs, int num)
3715 {
3716 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3717 struct ddc_service *ddc_service = i2c->ddc_service;
3718 struct i2c_command cmd;
3719 int i;
3720 int result = -EIO;
3721
3722 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3723
3724 if (!cmd.payloads)
3725 return result;
3726
3727 cmd.number_of_payloads = num;
3728 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3729 cmd.speed = 100;
3730
3731 for (i = 0; i < num; i++) {
3732 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3733 cmd.payloads[i].address = msgs[i].addr;
3734 cmd.payloads[i].length = msgs[i].len;
3735 cmd.payloads[i].data = msgs[i].buf;
3736 }
3737
3738 if (dal_i2caux_submit_i2c_command(
3739 ddc_service->ctx->i2caux,
3740 ddc_service->ddc_pin,
3741 &cmd))
3742 result = num;
3743
3744 kfree(cmd.payloads);
3745 return result;
3746 }
3747
amdgpu_dm_i2c_func(struct i2c_adapter * adap)3748 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3749 {
3750 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3751 }
3752
3753 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3754 .master_xfer = amdgpu_dm_i2c_xfer,
3755 .functionality = amdgpu_dm_i2c_func,
3756 };
3757
3758 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)3759 create_i2c(struct ddc_service *ddc_service,
3760 int link_index,
3761 int *res)
3762 {
3763 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3764 struct amdgpu_i2c_adapter *i2c;
3765
3766 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3767 if (!i2c)
3768 return NULL;
3769 #if 0
3770 i2c->base.owner = THIS_MODULE;
3771 i2c->base.class = I2C_CLASS_DDC;
3772 #endif
3773 i2c->base.dev.parent = &adev->pdev->dev;
3774 i2c->base.algo = &amdgpu_dm_i2c_algo;
3775 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3776 i2c_set_adapdata(&i2c->base, i2c);
3777 i2c->ddc_service = ddc_service;
3778
3779 return i2c;
3780 }
3781
3782
3783 /* Note: this function assumes that dc_link_detect() was called for the
3784 * dc_link which will be represented by this aconnector.
3785 */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)3786 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3787 struct amdgpu_dm_connector *aconnector,
3788 uint32_t link_index,
3789 struct amdgpu_encoder *aencoder)
3790 {
3791 int res = 0;
3792 int connector_type;
3793 struct dc *dc = dm->dc;
3794 struct dc_link *link = dc_get_link_at_index(dc, link_index);
3795 struct amdgpu_i2c_adapter *i2c;
3796
3797 link->priv = aconnector;
3798
3799 DRM_DEBUG_DRIVER("%s()\n", __func__);
3800
3801 i2c = create_i2c(link->ddc, link->link_index, &res);
3802 if (!i2c) {
3803 DRM_ERROR("Failed to create i2c adapter data\n");
3804 return -ENOMEM;
3805 }
3806
3807 aconnector->i2c = i2c;
3808 res = i2c_add_adapter(&i2c->base);
3809
3810 if (res) {
3811 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3812 goto out_free;
3813 }
3814
3815 connector_type = to_drm_connector_type(link->connector_signal);
3816
3817 res = drm_connector_init(
3818 dm->ddev,
3819 &aconnector->base,
3820 &amdgpu_dm_connector_funcs,
3821 connector_type);
3822
3823 if (res) {
3824 DRM_ERROR("connector_init failed\n");
3825 aconnector->connector_id = -1;
3826 goto out_free;
3827 }
3828
3829 drm_connector_helper_add(
3830 &aconnector->base,
3831 &amdgpu_dm_connector_helper_funcs);
3832
3833 amdgpu_dm_connector_init_helper(
3834 dm,
3835 aconnector,
3836 connector_type,
3837 link,
3838 link_index);
3839
3840 drm_mode_connector_attach_encoder(
3841 &aconnector->base, &aencoder->base);
3842
3843 drm_connector_register(&aconnector->base);
3844 #if defined(CONFIG_DEBUG_FS)
3845 res = connector_debugfs_init(aconnector);
3846 if (res) {
3847 DRM_ERROR("Failed to create debugfs for connector");
3848 goto out_free;
3849 }
3850 #endif
3851
3852 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3853 || connector_type == DRM_MODE_CONNECTOR_eDP)
3854 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3855
3856 out_free:
3857 if (res) {
3858 kfree(i2c);
3859 aconnector->i2c = NULL;
3860 }
3861 return res;
3862 }
3863
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)3864 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3865 {
3866 switch (adev->mode_info.num_crtc) {
3867 case 1:
3868 return 0x1;
3869 case 2:
3870 return 0x3;
3871 case 3:
3872 return 0x7;
3873 case 4:
3874 return 0xf;
3875 case 5:
3876 return 0x1f;
3877 case 6:
3878 default:
3879 return 0x3f;
3880 }
3881 }
3882
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)3883 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3884 struct amdgpu_encoder *aencoder,
3885 uint32_t link_index)
3886 {
3887 struct amdgpu_device *adev = dev->dev_private;
3888
3889 int res = drm_encoder_init(dev,
3890 &aencoder->base,
3891 &amdgpu_dm_encoder_funcs,
3892 DRM_MODE_ENCODER_TMDS,
3893 NULL);
3894
3895 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3896
3897 if (!res)
3898 aencoder->encoder_id = link_index;
3899 else
3900 aencoder->encoder_id = -1;
3901
3902 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3903
3904 return res;
3905 }
3906
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)3907 static void manage_dm_interrupts(struct amdgpu_device *adev,
3908 struct amdgpu_crtc *acrtc,
3909 bool enable)
3910 {
3911 /*
3912 * this is not correct translation but will work as soon as VBLANK
3913 * constant is the same as PFLIP
3914 */
3915 int irq_type =
3916 amdgpu_display_crtc_idx_to_irq_type(
3917 adev,
3918 acrtc->crtc_id);
3919
3920 if (enable) {
3921 drm_crtc_vblank_on(&acrtc->base);
3922 amdgpu_irq_get(
3923 adev,
3924 &adev->pageflip_irq,
3925 irq_type);
3926 } else {
3927
3928 amdgpu_irq_put(
3929 adev,
3930 &adev->pageflip_irq,
3931 irq_type);
3932 drm_crtc_vblank_off(&acrtc->base);
3933 }
3934 }
3935
3936 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)3937 is_scaling_state_different(const struct dm_connector_state *dm_state,
3938 const struct dm_connector_state *old_dm_state)
3939 {
3940 if (dm_state->scaling != old_dm_state->scaling)
3941 return true;
3942 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3943 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3944 return true;
3945 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3946 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3947 return true;
3948 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3949 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3950 return true;
3951 return false;
3952 }
3953
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)3954 static void remove_stream(struct amdgpu_device *adev,
3955 struct amdgpu_crtc *acrtc,
3956 struct dc_stream_state *stream)
3957 {
3958 /* this is the update mode case */
3959 if (adev->dm.freesync_module)
3960 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3961
3962 acrtc->otg_inst = -1;
3963 acrtc->enabled = false;
3964 }
3965
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)3966 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3967 struct dc_cursor_position *position)
3968 {
3969 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3970 int x, y;
3971 int xorigin = 0, yorigin = 0;
3972
3973 if (!crtc || !plane->state->fb) {
3974 position->enable = false;
3975 position->x = 0;
3976 position->y = 0;
3977 return 0;
3978 }
3979
3980 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3981 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3982 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3983 __func__,
3984 plane->state->crtc_w,
3985 plane->state->crtc_h);
3986 return -EINVAL;
3987 }
3988
3989 x = plane->state->crtc_x;
3990 y = plane->state->crtc_y;
3991 /* avivo cursor are offset into the total surface */
3992 x += crtc->primary->state->src_x >> 16;
3993 y += crtc->primary->state->src_y >> 16;
3994 if (x < 0) {
3995 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3996 x = 0;
3997 }
3998 if (y < 0) {
3999 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
4000 y = 0;
4001 }
4002 position->enable = true;
4003 position->x = x;
4004 position->y = y;
4005 position->x_hotspot = xorigin;
4006 position->y_hotspot = yorigin;
4007
4008 return 0;
4009 }
4010
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)4011 static void handle_cursor_update(struct drm_plane *plane,
4012 struct drm_plane_state *old_plane_state)
4013 {
4014 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
4015 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
4016 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
4017 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4018 uint64_t address = afb ? afb->address : 0;
4019 struct dc_cursor_position position;
4020 struct dc_cursor_attributes attributes;
4021 int ret;
4022
4023 if (!plane->state->fb && !old_plane_state->fb)
4024 return;
4025
4026 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4027 __func__,
4028 amdgpu_crtc->crtc_id,
4029 plane->state->crtc_w,
4030 plane->state->crtc_h);
4031
4032 ret = get_cursor_position(plane, crtc, &position);
4033 if (ret)
4034 return;
4035
4036 if (!position.enable) {
4037 /* turn off cursor */
4038 if (crtc_state && crtc_state->stream)
4039 dc_stream_set_cursor_position(crtc_state->stream,
4040 &position);
4041 return;
4042 }
4043
4044 amdgpu_crtc->cursor_width = plane->state->crtc_w;
4045 amdgpu_crtc->cursor_height = plane->state->crtc_h;
4046
4047 memset(&attributes, 0, sizeof(attributes));
4048 attributes.address.high_part = upper_32_bits(address);
4049 attributes.address.low_part = lower_32_bits(address);
4050 attributes.width = plane->state->crtc_w;
4051 attributes.height = plane->state->crtc_h;
4052 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
4053 attributes.rotation_angle = 0;
4054 attributes.attribute_flags.value = 0;
4055
4056 attributes.pitch = attributes.width;
4057
4058 if (crtc_state->stream) {
4059 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
4060 &attributes))
4061 DRM_ERROR("DC failed to set cursor attributes\n");
4062
4063 if (!dc_stream_set_cursor_position(crtc_state->stream,
4064 &position))
4065 DRM_ERROR("DC failed to set cursor position\n");
4066 }
4067 }
4068
prepare_flip_isr(struct amdgpu_crtc * acrtc)4069 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4070 {
4071
4072 assert_spin_locked(&acrtc->base.dev->event_lock);
4073 WARN_ON(acrtc->event);
4074
4075 acrtc->event = acrtc->base.state->event;
4076
4077 /* Set the flip status */
4078 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
4079
4080 /* Mark this event as consumed */
4081 acrtc->base.state->event = NULL;
4082
4083 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4084 acrtc->crtc_id);
4085 }
4086
4087 /*
4088 * Executes flip
4089 *
4090 * Waits on all BO's fences and for proper vblank count
4091 */
amdgpu_dm_do_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,uint32_t target,struct dc_state * state)4092 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
4093 struct drm_framebuffer *fb,
4094 uint32_t target,
4095 struct dc_state *state)
4096 {
4097 unsigned long flags;
4098 uint32_t target_vblank;
4099 int r, vpos, hpos;
4100 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4101 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
4102 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
4103 struct amdgpu_device *adev = crtc->dev->dev_private;
4104 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
4105 struct dc_flip_addrs addr = { {0} };
4106 /* TODO eliminate or rename surface_update */
4107 struct dc_surface_update surface_updates[1] = { {0} };
4108 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4109
4110
4111 /* Prepare wait for target vblank early - before the fence-waits */
4112 target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
4113 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
4114
4115 /* TODO This might fail and hence better not used, wait
4116 * explicitly on fences instead
4117 * and in general should be called for
4118 * blocking commit to as per framework helpers
4119 */
4120 r = amdgpu_bo_reserve(abo, true);
4121 if (unlikely(r != 0)) {
4122 DRM_ERROR("failed to reserve buffer before flip\n");
4123 WARN_ON(1);
4124 }
4125
4126 /* Wait for all fences on this FB */
4127 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
4128 MAX_SCHEDULE_TIMEOUT) < 0);
4129
4130 amdgpu_bo_unreserve(abo);
4131
4132 /* Wait until we're out of the vertical blank period before the one
4133 * targeted by the flip
4134 */
4135 while ((acrtc->enabled &&
4136 (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
4137 0, &vpos, &hpos, NULL,
4138 NULL, &crtc->hwmode)
4139 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
4140 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
4141 (int)(target_vblank -
4142 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
4143 usleep_range(1000, 1100);
4144 }
4145
4146 /* Flip */
4147 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4148
4149 WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
4150 WARN_ON(!acrtc_state->stream);
4151
4152 addr.address.grph.addr.low_part = lower_32_bits(afb->address);
4153 addr.address.grph.addr.high_part = upper_32_bits(afb->address);
4154 addr.flip_immediate = async_flip;
4155
4156
4157 if (acrtc->base.state->event)
4158 prepare_flip_isr(acrtc);
4159
4160 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4161
4162 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
4163 surface_updates->flip_addr = &addr;
4164
4165 dc_commit_updates_for_stream(adev->dm.dc,
4166 surface_updates,
4167 1,
4168 acrtc_state->stream,
4169 NULL,
4170 &surface_updates->surface,
4171 state);
4172
4173 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
4174 __func__,
4175 addr.address.grph.addr.high_part,
4176 addr.address.grph.addr.low_part);
4177 }
4178
4179 /*
4180 * TODO this whole function needs to go
4181 *
4182 * dc_surface_update is needlessly complex. See if we can just replace this
4183 * with a dc_plane_state and follow the atomic model a bit more closely here.
4184 */
commit_planes_to_stream(struct dc * dc,struct dc_plane_state ** plane_states,uint8_t new_plane_count,struct dm_crtc_state * dm_new_crtc_state,struct dm_crtc_state * dm_old_crtc_state,struct dc_state * state)4185 static bool commit_planes_to_stream(
4186 struct dc *dc,
4187 struct dc_plane_state **plane_states,
4188 uint8_t new_plane_count,
4189 struct dm_crtc_state *dm_new_crtc_state,
4190 struct dm_crtc_state *dm_old_crtc_state,
4191 struct dc_state *state)
4192 {
4193 /* no need to dynamically allocate this. it's pretty small */
4194 struct dc_surface_update updates[MAX_SURFACES];
4195 struct dc_flip_addrs *flip_addr;
4196 struct dc_plane_info *plane_info;
4197 struct dc_scaling_info *scaling_info;
4198 int i;
4199 struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
4200 struct dc_stream_update *stream_update =
4201 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
4202
4203 if (!stream_update) {
4204 BREAK_TO_DEBUGGER();
4205 return false;
4206 }
4207
4208 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
4209 GFP_KERNEL);
4210 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
4211 GFP_KERNEL);
4212 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
4213 GFP_KERNEL);
4214
4215 if (!flip_addr || !plane_info || !scaling_info) {
4216 kfree(flip_addr);
4217 kfree(plane_info);
4218 kfree(scaling_info);
4219 kfree(stream_update);
4220 return false;
4221 }
4222
4223 memset(updates, 0, sizeof(updates));
4224
4225 stream_update->src = dc_stream->src;
4226 stream_update->dst = dc_stream->dst;
4227 stream_update->out_transfer_func = dc_stream->out_transfer_func;
4228
4229 for (i = 0; i < new_plane_count; i++) {
4230 updates[i].surface = plane_states[i];
4231 updates[i].gamma =
4232 (struct dc_gamma *)plane_states[i]->gamma_correction;
4233 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
4234 flip_addr[i].address = plane_states[i]->address;
4235 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
4236 plane_info[i].color_space = plane_states[i]->color_space;
4237 plane_info[i].format = plane_states[i]->format;
4238 plane_info[i].plane_size = plane_states[i]->plane_size;
4239 plane_info[i].rotation = plane_states[i]->rotation;
4240 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
4241 plane_info[i].stereo_format = plane_states[i]->stereo_format;
4242 plane_info[i].tiling_info = plane_states[i]->tiling_info;
4243 plane_info[i].visible = plane_states[i]->visible;
4244 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
4245 plane_info[i].dcc = plane_states[i]->dcc;
4246 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
4247 scaling_info[i].src_rect = plane_states[i]->src_rect;
4248 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
4249 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
4250
4251 updates[i].flip_addr = &flip_addr[i];
4252 updates[i].plane_info = &plane_info[i];
4253 updates[i].scaling_info = &scaling_info[i];
4254 }
4255
4256 dc_commit_updates_for_stream(
4257 dc,
4258 updates,
4259 new_plane_count,
4260 dc_stream, stream_update, plane_states, state);
4261
4262 kfree(flip_addr);
4263 kfree(plane_info);
4264 kfree(scaling_info);
4265 kfree(stream_update);
4266 return true;
4267 }
4268
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool * wait_for_vblank)4269 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4270 struct drm_device *dev,
4271 struct amdgpu_display_manager *dm,
4272 struct drm_crtc *pcrtc,
4273 bool *wait_for_vblank)
4274 {
4275 uint32_t i;
4276 struct drm_plane *plane;
4277 struct drm_plane_state *old_plane_state, *new_plane_state;
4278 struct dc_stream_state *dc_stream_attach;
4279 struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
4280 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
4281 struct drm_crtc_state *new_pcrtc_state =
4282 drm_atomic_get_new_crtc_state(state, pcrtc);
4283 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4284 struct dm_crtc_state *dm_old_crtc_state =
4285 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4286 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4287 int planes_count = 0;
4288 unsigned long flags;
4289
4290 /* update planes when needed */
4291 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4292 struct drm_crtc *crtc = new_plane_state->crtc;
4293 struct drm_crtc_state *new_crtc_state;
4294 struct drm_framebuffer *fb = new_plane_state->fb;
4295 bool pflip_needed;
4296 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
4297
4298 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
4299 handle_cursor_update(plane, old_plane_state);
4300 continue;
4301 }
4302
4303 if (!fb || !crtc || pcrtc != crtc)
4304 continue;
4305
4306 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4307 if (!new_crtc_state->active)
4308 continue;
4309
4310 pflip_needed = !state->allow_modeset;
4311
4312 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4313 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
4314 DRM_ERROR("%s: acrtc %d, already busy\n",
4315 __func__,
4316 acrtc_attach->crtc_id);
4317 /* In commit tail framework this cannot happen */
4318 WARN_ON(1);
4319 }
4320 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4321
4322 if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
4323 WARN_ON(!dm_new_plane_state->dc_state);
4324
4325 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
4326
4327 dc_stream_attach = acrtc_state->stream;
4328 planes_count++;
4329
4330 } else if (new_crtc_state->planes_changed) {
4331 /* Assume even ONE crtc with immediate flip means
4332 * entire can't wait for VBLANK
4333 * TODO Check if it's correct
4334 */
4335 *wait_for_vblank =
4336 new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
4337 false : true;
4338
4339 /* TODO: Needs rework for multiplane flip */
4340 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
4341 drm_crtc_vblank_get(crtc);
4342
4343 amdgpu_dm_do_flip(
4344 crtc,
4345 fb,
4346 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
4347 dm_state->context);
4348 }
4349
4350 }
4351
4352 if (planes_count) {
4353 unsigned long flags;
4354
4355 if (new_pcrtc_state->event) {
4356
4357 drm_crtc_vblank_get(pcrtc);
4358
4359 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
4360 prepare_flip_isr(acrtc_attach);
4361 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
4362 }
4363
4364
4365 if (false == commit_planes_to_stream(dm->dc,
4366 plane_states_constructed,
4367 planes_count,
4368 acrtc_state,
4369 dm_old_crtc_state,
4370 dm_state->context))
4371 dm_error("%s: Failed to attach plane!\n", __func__);
4372 } else {
4373 /*TODO BUG Here should go disable planes on CRTC. */
4374 }
4375 }
4376
4377 /**
4378 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
4379 * @crtc_state: the DRM CRTC state
4380 * @stream_state: the DC stream state.
4381 *
4382 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
4383 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
4384 */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)4385 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
4386 struct dc_stream_state *stream_state)
4387 {
4388 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
4389 }
4390
amdgpu_dm_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)4391 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4392 struct drm_atomic_state *state,
4393 bool nonblock)
4394 {
4395 struct drm_crtc *crtc;
4396 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4397 struct amdgpu_device *adev = dev->dev_private;
4398 int i;
4399
4400 /*
4401 * We evade vblanks and pflips on crtc that
4402 * should be changed. We do it here to flush & disable
4403 * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4404 * it will update crtc->dm_crtc_state->stream pointer which is used in
4405 * the ISRs.
4406 */
4407 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4408 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4409 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4410 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4411
4412 if (drm_atomic_crtc_needs_modeset(new_crtc_state)
4413 && dm_old_crtc_state->stream) {
4414 /*
4415 * CRC capture was enabled but not disabled.
4416 * Release the vblank reference.
4417 */
4418 if (dm_new_crtc_state->crc_enabled) {
4419 drm_crtc_vblank_put(crtc);
4420 dm_new_crtc_state->crc_enabled = false;
4421 }
4422
4423 manage_dm_interrupts(adev, acrtc, false);
4424 }
4425 }
4426 /* Add check here for SoC's that support hardware cursor plane, to
4427 * unset legacy_cursor_update */
4428
4429 return drm_atomic_helper_commit(dev, state, nonblock);
4430
4431 /*TODO Handle EINTR, reenable IRQ*/
4432 }
4433
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)4434 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4435 {
4436 struct drm_device *dev = state->dev;
4437 struct amdgpu_device *adev = dev->dev_private;
4438 struct amdgpu_display_manager *dm = &adev->dm;
4439 struct dm_atomic_state *dm_state;
4440 uint32_t i, j;
4441 struct drm_crtc *crtc;
4442 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4443 unsigned long flags;
4444 bool wait_for_vblank = true;
4445 struct drm_connector *connector;
4446 struct drm_connector_state *old_con_state, *new_con_state;
4447 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4448 int crtc_disable_count = 0;
4449
4450 drm_atomic_helper_update_legacy_modeset_state(dev, state);
4451
4452 dm_state = to_dm_atomic_state(state);
4453
4454 /* update changed items */
4455 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4456 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4457
4458 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4459 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4460
4461 DRM_DEBUG_DRIVER(
4462 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4463 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4464 "connectors_changed:%d\n",
4465 acrtc->crtc_id,
4466 new_crtc_state->enable,
4467 new_crtc_state->active,
4468 new_crtc_state->planes_changed,
4469 new_crtc_state->mode_changed,
4470 new_crtc_state->active_changed,
4471 new_crtc_state->connectors_changed);
4472
4473 /* Copy all transient state flags into dc state */
4474 if (dm_new_crtc_state->stream) {
4475 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
4476 dm_new_crtc_state->stream);
4477 }
4478
4479 /* handles headless hotplug case, updating new_state and
4480 * aconnector as needed
4481 */
4482
4483 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4484
4485 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4486
4487 if (!dm_new_crtc_state->stream) {
4488 /*
4489 * this could happen because of issues with
4490 * userspace notifications delivery.
4491 * In this case userspace tries to set mode on
4492 * display which is disconnect in fact.
4493 * dc_sink in NULL in this case on aconnector.
4494 * We expect reset mode will come soon.
4495 *
4496 * This can also happen when unplug is done
4497 * during resume sequence ended
4498 *
4499 * In this case, we want to pretend we still
4500 * have a sink to keep the pipe running so that
4501 * hw state is consistent with the sw state
4502 */
4503 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4504 __func__, acrtc->base.base.id);
4505 continue;
4506 }
4507
4508 if (dm_old_crtc_state->stream)
4509 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4510 #if 0
4511 pm_runtime_get_noresume(dev->dev);
4512 #endif
4513
4514 acrtc->enabled = true;
4515 acrtc->hw_mode = new_crtc_state->mode;
4516 crtc->hwmode = new_crtc_state->mode;
4517 } else if (modereset_required(new_crtc_state)) {
4518 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4519
4520 /* i.e. reset mode */
4521 if (dm_old_crtc_state->stream)
4522 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4523 }
4524 } /* for_each_crtc_in_state() */
4525
4526 /*
4527 * Add streams after required streams from new and replaced streams
4528 * are removed from freesync module
4529 */
4530 if (adev->dm.freesync_module) {
4531 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4532 new_crtc_state, i) {
4533 struct amdgpu_dm_connector *aconnector = NULL;
4534 struct dm_connector_state *dm_new_con_state = NULL;
4535 struct amdgpu_crtc *acrtc = NULL;
4536 bool modeset_needed;
4537
4538 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4539 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4540 modeset_needed = modeset_required(
4541 new_crtc_state,
4542 dm_new_crtc_state->stream,
4543 dm_old_crtc_state->stream);
4544 /* We add stream to freesync if:
4545 * 1. Said stream is not null, and
4546 * 2. A modeset is requested. This means that the
4547 * stream was removed previously, and needs to be
4548 * replaced.
4549 */
4550 if (dm_new_crtc_state->stream == NULL ||
4551 !modeset_needed)
4552 continue;
4553
4554 acrtc = to_amdgpu_crtc(crtc);
4555
4556 aconnector =
4557 amdgpu_dm_find_first_crtc_matching_connector(
4558 state, crtc);
4559 if (!aconnector) {
4560 DRM_DEBUG_DRIVER("Atomic commit: Failed to "
4561 "find connector for acrtc "
4562 "id:%d skipping freesync "
4563 "init\n",
4564 acrtc->crtc_id);
4565 continue;
4566 }
4567
4568 mod_freesync_add_stream(adev->dm.freesync_module,
4569 dm_new_crtc_state->stream,
4570 &aconnector->caps);
4571 new_con_state = drm_atomic_get_new_connector_state(
4572 state, &aconnector->base);
4573 dm_new_con_state = to_dm_connector_state(new_con_state);
4574
4575 mod_freesync_set_user_enable(adev->dm.freesync_module,
4576 &dm_new_crtc_state->stream,
4577 1,
4578 &dm_new_con_state->user_enable);
4579 }
4580 }
4581
4582 if (dm_state->context) {
4583 dm_enable_per_frame_crtc_master_sync(dm_state->context);
4584 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4585 }
4586
4587 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4588 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4589
4590 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4591
4592 if (dm_new_crtc_state->stream != NULL) {
4593 const struct dc_stream_status *status =
4594 dc_stream_get_status(dm_new_crtc_state->stream);
4595
4596 if (!status)
4597 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4598 else
4599 acrtc->otg_inst = status->primary_otg_inst;
4600 }
4601 }
4602
4603 /* Handle scaling and underscan changes*/
4604 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4605 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4606 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4607 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4608 struct dc_stream_status *status = NULL;
4609
4610 if (acrtc) {
4611 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4612 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
4613 }
4614
4615 /* Skip any modesets/resets */
4616 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4617 continue;
4618
4619 /* Skip any thing not scale or underscan changes */
4620 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4621 continue;
4622
4623 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4624
4625 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4626 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4627
4628 if (!dm_new_crtc_state->stream)
4629 continue;
4630
4631 status = dc_stream_get_status(dm_new_crtc_state->stream);
4632 WARN_ON(!status);
4633 WARN_ON(!status->plane_count);
4634
4635 /*TODO How it works with MPO ?*/
4636 if (!commit_planes_to_stream(
4637 dm->dc,
4638 status->plane_states,
4639 status->plane_count,
4640 dm_new_crtc_state,
4641 to_dm_crtc_state(old_crtc_state),
4642 dm_state->context))
4643 dm_error("%s: Failed to update stream scaling!\n", __func__);
4644 }
4645
4646 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
4647 new_crtc_state, i) {
4648 /*
4649 * loop to enable interrupts on newly arrived crtc
4650 */
4651 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4652 bool modeset_needed;
4653
4654 if (old_crtc_state->active && !new_crtc_state->active)
4655 crtc_disable_count++;
4656
4657 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4658 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4659 modeset_needed = modeset_required(
4660 new_crtc_state,
4661 dm_new_crtc_state->stream,
4662 dm_old_crtc_state->stream);
4663
4664 if (dm_new_crtc_state->stream == NULL || !modeset_needed)
4665 continue;
4666
4667 if (adev->dm.freesync_module)
4668 mod_freesync_notify_mode_change(
4669 adev->dm.freesync_module,
4670 &dm_new_crtc_state->stream, 1);
4671
4672 manage_dm_interrupts(adev, acrtc, true);
4673 }
4674
4675 /* update planes when needed per crtc*/
4676 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4677 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4678
4679 if (dm_new_crtc_state->stream)
4680 amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4681 }
4682
4683
4684 /*
4685 * send vblank event on all events not handled in flip and
4686 * mark consumed event for drm_atomic_helper_commit_hw_done
4687 */
4688 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4689 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4690
4691 if (new_crtc_state->event)
4692 drm_send_event_locked(dev, &new_crtc_state->event->base);
4693
4694 new_crtc_state->event = NULL;
4695 }
4696 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4697
4698
4699 if (wait_for_vblank)
4700 drm_atomic_helper_wait_for_flip_done(dev, state);
4701
4702 /*
4703 * FIXME:
4704 * Delay hw_done() until flip_done() is signaled. This is to block
4705 * another commit from freeing the CRTC state while we're still
4706 * waiting on flip_done.
4707 */
4708 drm_atomic_helper_commit_hw_done(state);
4709
4710 drm_atomic_helper_cleanup_planes(dev, state);
4711
4712 /* Finally, drop a runtime PM reference for each newly disabled CRTC,
4713 * so we can put the GPU into runtime suspend if we're not driving any
4714 * displays anymore
4715 */
4716 for (i = 0; i < crtc_disable_count; i++)
4717 pm_runtime_put_autosuspend(dev->dev);
4718 pm_runtime_mark_last_busy(dev->dev);
4719 }
4720
4721
dm_force_atomic_commit(struct drm_connector * connector)4722 static int dm_force_atomic_commit(struct drm_connector *connector)
4723 {
4724 int ret = 0;
4725 struct drm_device *ddev = connector->dev;
4726 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4727 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4728 struct drm_plane *plane = disconnected_acrtc->base.primary;
4729 struct drm_connector_state *conn_state;
4730 struct drm_crtc_state *crtc_state;
4731 struct drm_plane_state *plane_state;
4732
4733 if (!state)
4734 return -ENOMEM;
4735
4736 state->acquire_ctx = ddev->mode_config.acquire_ctx;
4737
4738 /* Construct an atomic state to restore previous display setting */
4739
4740 /*
4741 * Attach connectors to drm_atomic_state
4742 */
4743 conn_state = drm_atomic_get_connector_state(state, connector);
4744
4745 ret = PTR_ERR_OR_ZERO(conn_state);
4746 if (ret)
4747 goto out;
4748
4749 /* Attach crtc to drm_atomic_state*/
4750 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4751
4752 ret = PTR_ERR_OR_ZERO(crtc_state);
4753 if (ret)
4754 goto out;
4755
4756 /* force a restore */
4757 crtc_state->mode_changed = true;
4758
4759 /* Attach plane to drm_atomic_state */
4760 plane_state = drm_atomic_get_plane_state(state, plane);
4761
4762 ret = PTR_ERR_OR_ZERO(plane_state);
4763 if (ret)
4764 goto out;
4765
4766 /* Call commit internally with the state we just constructed */
4767 ret = drm_atomic_commit(state);
4768
4769 out:
4770 drm_atomic_state_put(state);
4771 if (ret)
4772 DRM_ERROR("Restoring old state failed with %i\n", ret);
4773
4774 return ret;
4775 }
4776
4777 /*
4778 * This functions handle all cases when set mode does not come upon hotplug.
4779 * This include when the same display is unplugged then plugged back into the
4780 * same port and when we are running without usermode desktop manager supprot
4781 */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)4782 void dm_restore_drm_connector_state(struct drm_device *dev,
4783 struct drm_connector *connector)
4784 {
4785 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4786 struct amdgpu_crtc *disconnected_acrtc;
4787 struct dm_crtc_state *acrtc_state;
4788
4789 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4790 return;
4791
4792 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4793 if (!disconnected_acrtc)
4794 return;
4795
4796 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4797 if (!acrtc_state->stream)
4798 return;
4799
4800 /*
4801 * If the previous sink is not released and different from the current,
4802 * we deduce we are in a state where we can not rely on usermode call
4803 * to turn on the display, so we do it here
4804 */
4805 if (acrtc_state->stream->sink != aconnector->dc_sink)
4806 dm_force_atomic_commit(&aconnector->base);
4807 }
4808
4809 /*`
4810 * Grabs all modesetting locks to serialize against any blocking commits,
4811 * Waits for completion of all non blocking commits.
4812 */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)4813 static int do_aquire_global_lock(struct drm_device *dev,
4814 struct drm_atomic_state *state)
4815 {
4816 struct drm_crtc *crtc;
4817 struct drm_crtc_commit *commit;
4818 long ret;
4819
4820 /* Adding all modeset locks to aquire_ctx will
4821 * ensure that when the framework release it the
4822 * extra locks we are locking here will get released to
4823 */
4824 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4825 if (ret)
4826 return ret;
4827
4828 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4829 lockmgr(&crtc->commit_lock, LK_EXCLUSIVE);
4830 commit = list_first_entry_or_null(&crtc->commit_list,
4831 struct drm_crtc_commit, commit_entry);
4832 if (commit)
4833 drm_crtc_commit_get(commit);
4834 lockmgr(&crtc->commit_lock, LK_RELEASE);
4835
4836 if (!commit)
4837 continue;
4838
4839 /* Make sure all pending HW programming completed and
4840 * page flips done
4841 */
4842 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4843
4844 if (ret > 0)
4845 ret = wait_for_completion_interruptible_timeout(
4846 &commit->flip_done, 10*HZ);
4847
4848 if (ret == 0)
4849 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4850 "timed out\n", crtc->base.id, crtc->name);
4851
4852 drm_crtc_commit_put(commit);
4853 }
4854
4855 return ret < 0 ? ret : 0;
4856 }
4857
dm_update_crtcs_state(struct dc * dc,struct drm_atomic_state * state,bool enable,bool * lock_and_validation_needed)4858 static int dm_update_crtcs_state(struct dc *dc,
4859 struct drm_atomic_state *state,
4860 bool enable,
4861 bool *lock_and_validation_needed)
4862 {
4863 struct drm_crtc *crtc;
4864 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4865 int i;
4866 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4867 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4868 struct dc_stream_state *new_stream;
4869 int ret = 0;
4870
4871 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4872 /* update changed items */
4873 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4874 struct amdgpu_crtc *acrtc = NULL;
4875 struct amdgpu_dm_connector *aconnector = NULL;
4876 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
4877 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
4878 struct drm_plane_state *new_plane_state = NULL;
4879
4880 new_stream = NULL;
4881
4882 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4883 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4884 acrtc = to_amdgpu_crtc(crtc);
4885
4886 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
4887
4888 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
4889 ret = -EINVAL;
4890 goto fail;
4891 }
4892
4893 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4894
4895 /* TODO This hack should go away */
4896 if (aconnector && enable) {
4897 // Make sure fake sink is created in plug-in scenario
4898 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
4899 &aconnector->base);
4900 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
4901 &aconnector->base);
4902
4903 if (IS_ERR(drm_new_conn_state)) {
4904 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
4905 break;
4906 }
4907
4908 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
4909 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
4910
4911 new_stream = create_stream_for_sink(aconnector,
4912 &new_crtc_state->mode,
4913 dm_new_conn_state);
4914
4915 /*
4916 * we can have no stream on ACTION_SET if a display
4917 * was disconnected during S3, in this case it not and
4918 * error, the OS will be updated after detection, and
4919 * do the right thing on next atomic commit
4920 */
4921
4922 if (!new_stream) {
4923 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4924 __func__, acrtc->base.base.id);
4925 break;
4926 }
4927
4928 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4929 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4930 new_crtc_state->mode_changed = false;
4931 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4932 new_crtc_state->mode_changed);
4933 }
4934 }
4935
4936 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4937 goto next_crtc;
4938
4939 DRM_DEBUG_DRIVER(
4940 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4941 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4942 "connectors_changed:%d\n",
4943 acrtc->crtc_id,
4944 new_crtc_state->enable,
4945 new_crtc_state->active,
4946 new_crtc_state->planes_changed,
4947 new_crtc_state->mode_changed,
4948 new_crtc_state->active_changed,
4949 new_crtc_state->connectors_changed);
4950
4951 /* Remove stream for any changed/disabled CRTC */
4952 if (!enable) {
4953
4954 if (!dm_old_crtc_state->stream)
4955 goto next_crtc;
4956
4957 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4958 crtc->base.id);
4959
4960 /* i.e. reset mode */
4961 if (dc_remove_stream_from_ctx(
4962 dc,
4963 dm_state->context,
4964 dm_old_crtc_state->stream) != DC_OK) {
4965 ret = -EINVAL;
4966 goto fail;
4967 }
4968
4969 dc_stream_release(dm_old_crtc_state->stream);
4970 dm_new_crtc_state->stream = NULL;
4971
4972 *lock_and_validation_needed = true;
4973
4974 } else {/* Add stream for any updated/enabled CRTC */
4975 /*
4976 * Quick fix to prevent NULL pointer on new_stream when
4977 * added MST connectors not found in existing crtc_state in the chained mode
4978 * TODO: need to dig out the root cause of that
4979 */
4980 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4981 goto next_crtc;
4982
4983 if (modereset_required(new_crtc_state))
4984 goto next_crtc;
4985
4986 if (modeset_required(new_crtc_state, new_stream,
4987 dm_old_crtc_state->stream)) {
4988
4989 WARN_ON(dm_new_crtc_state->stream);
4990
4991 dm_new_crtc_state->stream = new_stream;
4992
4993 dc_stream_retain(new_stream);
4994
4995 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4996 crtc->base.id);
4997
4998 if (dc_add_stream_to_ctx(
4999 dc,
5000 dm_state->context,
5001 dm_new_crtc_state->stream) != DC_OK) {
5002 ret = -EINVAL;
5003 goto fail;
5004 }
5005
5006 *lock_and_validation_needed = true;
5007 }
5008 }
5009
5010 next_crtc:
5011 /* Release extra reference */
5012 if (new_stream)
5013 dc_stream_release(new_stream);
5014
5015 /*
5016 * We want to do dc stream updates that do not require a
5017 * full modeset below.
5018 */
5019 if (!(enable && aconnector && new_crtc_state->enable &&
5020 new_crtc_state->active))
5021 continue;
5022 /*
5023 * Given above conditions, the dc state cannot be NULL because:
5024 * 1. We're in the process of enabling CRTCs (just been added
5025 * to the dc context, or already is on the context)
5026 * 2. Has a valid connector attached, and
5027 * 3. Is currently active and enabled.
5028 * => The dc stream state currently exists.
5029 */
5030 BUG_ON(dm_new_crtc_state->stream == NULL);
5031
5032 /* Scaling or underscan settings */
5033 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
5034 update_stream_scaling_settings(
5035 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
5036
5037 /*
5038 * Color management settings. We also update color properties
5039 * when a modeset is needed, to ensure it gets reprogrammed.
5040 */
5041 if (dm_new_crtc_state->base.color_mgmt_changed ||
5042 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
5043 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
5044 if (ret)
5045 goto fail;
5046 amdgpu_dm_set_ctm(dm_new_crtc_state);
5047 }
5048 }
5049
5050 return ret;
5051
5052 fail:
5053 if (new_stream)
5054 dc_stream_release(new_stream);
5055 return ret;
5056 }
5057
dm_update_planes_state(struct dc * dc,struct drm_atomic_state * state,bool enable,bool * lock_and_validation_needed)5058 static int dm_update_planes_state(struct dc *dc,
5059 struct drm_atomic_state *state,
5060 bool enable,
5061 bool *lock_and_validation_needed)
5062 {
5063 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5064 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5065 struct drm_plane *plane;
5066 struct drm_plane_state *old_plane_state, *new_plane_state;
5067 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5068 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
5069 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5070 int i ;
5071 /* TODO return page_flip_needed() function */
5072 bool pflip_needed = !state->allow_modeset;
5073 int ret = 0;
5074
5075
5076 /* Add new planes, in reverse order as DC expectation */
5077 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
5078 new_plane_crtc = new_plane_state->crtc;
5079 old_plane_crtc = old_plane_state->crtc;
5080 dm_new_plane_state = to_dm_plane_state(new_plane_state);
5081 dm_old_plane_state = to_dm_plane_state(old_plane_state);
5082
5083 /*TODO Implement atomic check for cursor plane */
5084 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5085 continue;
5086
5087 /* Remove any changed/removed planes */
5088 if (!enable) {
5089 if (pflip_needed &&
5090 plane->type != DRM_PLANE_TYPE_OVERLAY)
5091 continue;
5092
5093 if (!old_plane_crtc)
5094 continue;
5095
5096 old_crtc_state = drm_atomic_get_old_crtc_state(
5097 state, old_plane_crtc);
5098 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5099
5100 if (!dm_old_crtc_state->stream)
5101 continue;
5102
5103 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5104 plane->base.id, old_plane_crtc->base.id);
5105
5106 if (!dc_remove_plane_from_context(
5107 dc,
5108 dm_old_crtc_state->stream,
5109 dm_old_plane_state->dc_state,
5110 dm_state->context)) {
5111
5112 ret = EINVAL;
5113 return ret;
5114 }
5115
5116
5117 dc_plane_state_release(dm_old_plane_state->dc_state);
5118 dm_new_plane_state->dc_state = NULL;
5119
5120 *lock_and_validation_needed = true;
5121
5122 } else { /* Add new planes */
5123 struct dc_plane_state *dc_new_plane_state;
5124
5125 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
5126 continue;
5127
5128 if (!new_plane_crtc)
5129 continue;
5130
5131 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
5132 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5133
5134 if (!dm_new_crtc_state->stream)
5135 continue;
5136
5137 if (pflip_needed &&
5138 plane->type != DRM_PLANE_TYPE_OVERLAY)
5139 continue;
5140
5141 WARN_ON(dm_new_plane_state->dc_state);
5142
5143 dc_new_plane_state = dc_create_plane_state(dc);
5144 if (!dc_new_plane_state)
5145 return -ENOMEM;
5146
5147 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
5148 plane->base.id, new_plane_crtc->base.id);
5149
5150 ret = fill_plane_attributes(
5151 new_plane_crtc->dev->dev_private,
5152 dc_new_plane_state,
5153 new_plane_state,
5154 new_crtc_state);
5155 if (ret) {
5156 dc_plane_state_release(dc_new_plane_state);
5157 return ret;
5158 }
5159
5160 /*
5161 * Any atomic check errors that occur after this will
5162 * not need a release. The plane state will be attached
5163 * to the stream, and therefore part of the atomic
5164 * state. It'll be released when the atomic state is
5165 * cleaned.
5166 */
5167 if (!dc_add_plane_to_context(
5168 dc,
5169 dm_new_crtc_state->stream,
5170 dc_new_plane_state,
5171 dm_state->context)) {
5172
5173 dc_plane_state_release(dc_new_plane_state);
5174 return -EINVAL;
5175 }
5176
5177 dm_new_plane_state->dc_state = dc_new_plane_state;
5178
5179 /* Tell DC to do a full surface update every time there
5180 * is a plane change. Inefficient, but works for now.
5181 */
5182 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
5183
5184 *lock_and_validation_needed = true;
5185 }
5186 }
5187
5188
5189 return ret;
5190 }
5191
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)5192 static int amdgpu_dm_atomic_check(struct drm_device *dev,
5193 struct drm_atomic_state *state)
5194 {
5195 struct amdgpu_device *adev = dev->dev_private;
5196 struct dc *dc = adev->dm.dc;
5197 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
5198 struct drm_connector *connector;
5199 struct drm_connector_state *old_con_state, *new_con_state;
5200 struct drm_crtc *crtc;
5201 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5202 int ret, i;
5203
5204 /*
5205 * This bool will be set for true for any modeset/reset
5206 * or plane update which implies non fast surface update.
5207 */
5208 bool lock_and_validation_needed = false;
5209
5210 ret = drm_atomic_helper_check_modeset(dev, state);
5211 if (ret)
5212 goto fail;
5213
5214 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5215 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5216 !new_crtc_state->color_mgmt_changed)
5217 continue;
5218
5219 if (!new_crtc_state->enable)
5220 continue;
5221
5222 ret = drm_atomic_add_affected_connectors(state, crtc);
5223 if (ret)
5224 return ret;
5225
5226 ret = drm_atomic_add_affected_planes(state, crtc);
5227 if (ret)
5228 goto fail;
5229 }
5230
5231 dm_state->context = dc_create_state();
5232 ASSERT(dm_state->context);
5233 dc_resource_state_copy_construct_current(dc, dm_state->context);
5234
5235 /* Remove exiting planes if they are modified */
5236 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
5237 if (ret) {
5238 goto fail;
5239 }
5240
5241 /* Disable all crtcs which require disable */
5242 ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
5243 if (ret) {
5244 goto fail;
5245 }
5246
5247 /* Enable all crtcs which require enable */
5248 ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
5249 if (ret) {
5250 goto fail;
5251 }
5252
5253 /* Add new/modified planes */
5254 ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
5255 if (ret) {
5256 goto fail;
5257 }
5258
5259 /* Run this here since we want to validate the streams we created */
5260 ret = drm_atomic_helper_check_planes(dev, state);
5261 if (ret)
5262 goto fail;
5263
5264 /* Check scaling and underscan changes*/
5265 /*TODO Removed scaling changes validation due to inability to commit
5266 * new stream into context w\o causing full reset. Need to
5267 * decide how to handle.
5268 */
5269 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5270 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5271 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5272 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5273
5274 /* Skip any modesets/resets */
5275 if (!acrtc || drm_atomic_crtc_needs_modeset(
5276 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
5277 continue;
5278
5279 /* Skip any thing not scale or underscan changes */
5280 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
5281 continue;
5282
5283 lock_and_validation_needed = true;
5284 }
5285
5286 /*
5287 * For full updates case when
5288 * removing/adding/updating streams on once CRTC while flipping
5289 * on another CRTC,
5290 * acquiring global lock will guarantee that any such full
5291 * update commit
5292 * will wait for completion of any outstanding flip using DRMs
5293 * synchronization events.
5294 */
5295
5296 if (lock_and_validation_needed) {
5297
5298 ret = do_aquire_global_lock(dev, state);
5299 if (ret)
5300 goto fail;
5301
5302 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
5303 ret = -EINVAL;
5304 goto fail;
5305 }
5306 }
5307
5308 /* Must be success */
5309 WARN_ON(ret);
5310 return ret;
5311
5312 fail:
5313 if (ret == -EDEADLK)
5314 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
5315 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
5316 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
5317 else
5318 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
5319
5320 return ret;
5321 }
5322
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)5323 static bool is_dp_capable_without_timing_msa(struct dc *dc,
5324 struct amdgpu_dm_connector *amdgpu_dm_connector)
5325 {
5326 uint8_t dpcd_data;
5327 bool capable = false;
5328
5329 if (amdgpu_dm_connector->dc_link &&
5330 dm_helpers_dp_read_dpcd(
5331 NULL,
5332 amdgpu_dm_connector->dc_link,
5333 DP_DOWN_STREAM_PORT_COUNT,
5334 &dpcd_data,
5335 sizeof(dpcd_data))) {
5336 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
5337 }
5338
5339 return capable;
5340 }
amdgpu_dm_add_sink_to_freesync_module(struct drm_connector * connector,struct edid * edid)5341 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
5342 struct edid *edid)
5343 {
5344 int i;
5345 bool edid_check_required;
5346 struct detailed_timing *timing;
5347 struct detailed_non_pixel *data;
5348 struct detailed_data_monitor_range *range;
5349 struct amdgpu_dm_connector *amdgpu_dm_connector =
5350 to_amdgpu_dm_connector(connector);
5351 struct dm_connector_state *dm_con_state;
5352
5353 struct drm_device *dev = connector->dev;
5354 struct amdgpu_device *adev = dev->dev_private;
5355
5356 if (!connector->state) {
5357 DRM_ERROR("%s - Connector has no state", __func__);
5358 return;
5359 }
5360
5361 dm_con_state = to_dm_connector_state(connector->state);
5362
5363 edid_check_required = false;
5364 if (!amdgpu_dm_connector->dc_sink) {
5365 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
5366 return;
5367 }
5368 if (!adev->dm.freesync_module)
5369 return;
5370 /*
5371 * if edid non zero restrict freesync only for dp and edp
5372 */
5373 if (edid) {
5374 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
5375 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
5376 edid_check_required = is_dp_capable_without_timing_msa(
5377 adev->dm.dc,
5378 amdgpu_dm_connector);
5379 }
5380 }
5381 dm_con_state->freesync_capable = false;
5382 if (edid_check_required == true && (edid->version > 1 ||
5383 (edid->version == 1 && edid->revision > 1))) {
5384 for (i = 0; i < 4; i++) {
5385
5386 timing = &edid->detailed_timings[i];
5387 data = &timing->data.other_data;
5388 range = &data->data.range;
5389 /*
5390 * Check if monitor has continuous frequency mode
5391 */
5392 if (data->type != EDID_DETAIL_MONITOR_RANGE)
5393 continue;
5394 /*
5395 * Check for flag range limits only. If flag == 1 then
5396 * no additional timing information provided.
5397 * Default GTF, GTF Secondary curve and CVT are not
5398 * supported
5399 */
5400 if (range->flags != 1)
5401 continue;
5402
5403 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
5404 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
5405 amdgpu_dm_connector->pixel_clock_mhz =
5406 range->pixel_clock_mhz * 10;
5407 break;
5408 }
5409
5410 if (amdgpu_dm_connector->max_vfreq -
5411 amdgpu_dm_connector->min_vfreq > 10) {
5412 amdgpu_dm_connector->caps.supported = true;
5413 amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
5414 amdgpu_dm_connector->min_vfreq * 1000000;
5415 amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
5416 amdgpu_dm_connector->max_vfreq * 1000000;
5417 dm_con_state->freesync_capable = true;
5418 }
5419 }
5420
5421 /*
5422 * TODO figure out how to notify user-mode or DRM of freesync caps
5423 * once we figure out how to deal with freesync in an upstreamable
5424 * fashion
5425 */
5426
5427 }
5428
amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector * connector)5429 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
5430 {
5431 /*
5432 * TODO fill in once we figure out how to deal with freesync in
5433 * an upstreamable fashion
5434 */
5435 }
5436