1 /* $NetBSD: amdgpu_dm_mst_types.c,v 1.2 2021/12/18 23:45:00 riastradh Exp $ */
2
3 /*
4 * Copyright 2012-15 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: AMD
25 *
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dm_mst_types.c,v 1.2 2021/12/18 23:45:00 riastradh Exp $");
30
31 #include <linux/version.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_dp_mst_helper.h>
34 #include "dm_services.h"
35 #include "amdgpu.h"
36 #include "amdgpu_dm.h"
37 #include "amdgpu_dm_mst_types.h"
38
39 #include "dc.h"
40 #include "dm_helpers.h"
41
42 #include "dc_link_ddc.h"
43
44 #include "i2caux_interface.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
47 #endif
48
49
50 #if defined(CONFIG_DRM_AMD_DC_DCN)
51 #include "dc/dcn20/dcn20_resource.h"
52 #endif
53
54 /* #define TRACE_DPCD */
55
56 #ifdef TRACE_DPCD
57 #define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
58
side_band_msg_type_to_str(uint32_t address)59 static inline char *side_band_msg_type_to_str(uint32_t address)
60 {
61 static char str[10] = {0};
62
63 if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
64 strcpy(str, "DOWN_REQ");
65 else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
66 strcpy(str, "UP_REP");
67 else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
68 strcpy(str, "DOWN_REP");
69 else
70 strcpy(str, "UP_REQ");
71
72 return str;
73 }
74
log_dpcd(uint8_t type,uint32_t address,uint8_t * data,uint32_t size,bool res)75 static void log_dpcd(uint8_t type,
76 uint32_t address,
77 uint8_t *data,
78 uint32_t size,
79 bool res)
80 {
81 DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
82 (type == DP_AUX_NATIVE_READ) ||
83 (type == DP_AUX_I2C_READ) ?
84 "Read" : "Write",
85 address,
86 SIDE_BAND_MSG(address) ?
87 side_band_msg_type_to_str(address) : "Nop",
88 res ? "OK" : "Fail");
89
90 if (res) {
91 print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
92 }
93 }
94 #endif
95
dm_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)96 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
97 struct drm_dp_aux_msg *msg)
98 {
99 ssize_t result = 0;
100 struct aux_payload payload;
101 enum aux_channel_operation_result operation_result;
102
103 if (WARN_ON(msg->size > 16))
104 return -E2BIG;
105
106 payload.address = msg->address;
107 payload.data = msg->buffer;
108 payload.length = msg->size;
109 payload.reply = &msg->reply;
110 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
111 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
112 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
113 payload.defer_delay = 0;
114
115 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
116 &operation_result);
117
118 if (payload.write)
119 result = msg->size;
120
121 if (result < 0)
122 switch (operation_result) {
123 case AUX_CHANNEL_OPERATION_SUCCEEDED:
124 break;
125 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
126 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
127 result = -EIO;
128 break;
129 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
130 case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
131 result = -EBUSY;
132 break;
133 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
134 result = -ETIMEDOUT;
135 break;
136 }
137
138 return result;
139 }
140
141 static void
dm_dp_mst_connector_destroy(struct drm_connector * connector)142 dm_dp_mst_connector_destroy(struct drm_connector *connector)
143 {
144 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
145 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
146
147 kfree(amdgpu_dm_connector->edid);
148 amdgpu_dm_connector->edid = NULL;
149
150 drm_encoder_cleanup(&amdgpu_encoder->base);
151 kfree(amdgpu_encoder);
152 drm_connector_cleanup(connector);
153 drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
154 kfree(amdgpu_dm_connector);
155 }
156
157 static int
amdgpu_dm_mst_connector_late_register(struct drm_connector * connector)158 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
159 {
160 struct amdgpu_dm_connector *amdgpu_dm_connector =
161 to_amdgpu_dm_connector(connector);
162 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
163
164 #if defined(CONFIG_DEBUG_FS)
165 connector_debugfs_init(amdgpu_dm_connector);
166 amdgpu_dm_connector->debugfs_dpcd_address = 0;
167 amdgpu_dm_connector->debugfs_dpcd_size = 0;
168 #endif
169
170 return drm_dp_mst_connector_late_register(connector, port);
171 }
172
173 static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector * connector)174 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
175 {
176 struct amdgpu_dm_connector *amdgpu_dm_connector =
177 to_amdgpu_dm_connector(connector);
178 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
179
180 drm_dp_mst_connector_early_unregister(connector, port);
181 }
182
183 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
184 .fill_modes = drm_helper_probe_single_connector_modes,
185 .destroy = dm_dp_mst_connector_destroy,
186 .reset = amdgpu_dm_connector_funcs_reset,
187 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
188 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
189 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
190 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
191 .late_register = amdgpu_dm_mst_connector_late_register,
192 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
193 };
194
195 #if defined(CONFIG_DRM_AMD_DC_DCN)
validate_dsc_caps_on_connector(struct amdgpu_dm_connector * aconnector)196 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
197 {
198 struct dc_sink *dc_sink = aconnector->dc_sink;
199 struct drm_dp_mst_port *port = aconnector->port;
200 u8 dsc_caps[16] = { 0 };
201
202 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
203
204 if (!aconnector->dsc_aux)
205 return false;
206
207 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
208 return false;
209
210 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
211 dsc_caps, NULL,
212 &dc_sink->sink_dsc_caps.dsc_dec_caps))
213 return false;
214
215 return true;
216 }
217 #endif
218
dm_dp_mst_get_modes(struct drm_connector * connector)219 static int dm_dp_mst_get_modes(struct drm_connector *connector)
220 {
221 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
222 int ret = 0;
223
224 if (!aconnector)
225 return drm_add_edid_modes(connector, NULL);
226
227 if (!aconnector->edid) {
228 struct edid *edid;
229 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
230
231 if (!edid) {
232 drm_connector_update_edid_property(
233 &aconnector->base,
234 NULL);
235 return ret;
236 }
237
238 aconnector->edid = edid;
239 }
240
241 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
242 dc_sink_release(aconnector->dc_sink);
243 aconnector->dc_sink = NULL;
244 }
245
246 if (!aconnector->dc_sink) {
247 struct dc_sink *dc_sink;
248 struct dc_sink_init_data init_params = {
249 .link = aconnector->dc_link,
250 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
251 dc_sink = dc_link_add_remote_sink(
252 aconnector->dc_link,
253 (uint8_t *)aconnector->edid,
254 (aconnector->edid->extensions + 1) * EDID_LENGTH,
255 &init_params);
256
257 dc_sink->priv = aconnector;
258 /* dc_link_add_remote_sink returns a new reference */
259 aconnector->dc_sink = dc_sink;
260
261 if (aconnector->dc_sink) {
262 amdgpu_dm_update_freesync_caps(
263 connector, aconnector->edid);
264
265 #if defined(CONFIG_DRM_AMD_DC_DCN)
266 if (!validate_dsc_caps_on_connector(aconnector))
267 memset(&aconnector->dc_sink->sink_dsc_caps,
268 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
269 #endif
270 }
271 }
272
273 drm_connector_update_edid_property(
274 &aconnector->base, aconnector->edid);
275
276 ret = drm_add_edid_modes(connector, aconnector->edid);
277
278 return ret;
279 }
280
281 static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector * connector,struct drm_connector_state * connector_state)282 dm_mst_atomic_best_encoder(struct drm_connector *connector,
283 struct drm_connector_state *connector_state)
284 {
285 return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
286 }
287
288 static int
dm_dp_mst_detect(struct drm_connector * connector,struct drm_modeset_acquire_ctx * ctx,bool force)289 dm_dp_mst_detect(struct drm_connector *connector,
290 struct drm_modeset_acquire_ctx *ctx, bool force)
291 {
292 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
293 struct amdgpu_dm_connector *master = aconnector->mst_port;
294
295 return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
296 aconnector->port);
297 }
298
dm_dp_mst_atomic_check(struct drm_connector * connector,struct drm_atomic_state * state)299 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
300 struct drm_atomic_state *state)
301 {
302 struct drm_connector_state *new_conn_state =
303 drm_atomic_get_new_connector_state(state, connector);
304 struct drm_connector_state *old_conn_state =
305 drm_atomic_get_old_connector_state(state, connector);
306 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
307 struct drm_crtc_state *new_crtc_state;
308 struct drm_dp_mst_topology_mgr *mst_mgr;
309 struct drm_dp_mst_port *mst_port;
310
311 mst_port = aconnector->port;
312 mst_mgr = &aconnector->mst_port->mst_mgr;
313
314 if (!old_conn_state->crtc)
315 return 0;
316
317 if (new_conn_state->crtc) {
318 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
319 if (!new_crtc_state ||
320 !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
321 new_crtc_state->enable)
322 return 0;
323 }
324
325 return drm_dp_atomic_release_vcpi_slots(state,
326 mst_mgr,
327 mst_port);
328 }
329
330 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
331 .get_modes = dm_dp_mst_get_modes,
332 .mode_valid = amdgpu_dm_connector_mode_valid,
333 .atomic_best_encoder = dm_mst_atomic_best_encoder,
334 .detect_ctx = dm_dp_mst_detect,
335 .atomic_check = dm_dp_mst_atomic_check,
336 };
337
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)338 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
339 {
340 drm_encoder_cleanup(encoder);
341 kfree(encoder);
342 }
343
344 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
345 .destroy = amdgpu_dm_encoder_destroy,
346 };
347
348 static struct amdgpu_encoder *
dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector * connector)349 dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
350 {
351 struct drm_device *dev = connector->base.dev;
352 struct amdgpu_device *adev = dev->dev_private;
353 struct amdgpu_encoder *amdgpu_encoder;
354 struct drm_encoder *encoder;
355
356 amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
357 if (!amdgpu_encoder)
358 return NULL;
359
360 encoder = &amdgpu_encoder->base;
361 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
362
363 drm_encoder_init(
364 dev,
365 &amdgpu_encoder->base,
366 &amdgpu_dm_encoder_funcs,
367 DRM_MODE_ENCODER_DPMST,
368 NULL);
369
370 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
371
372 return amdgpu_encoder;
373 }
374
375 static struct drm_connector *
dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,const char * pathprop)376 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
377 struct drm_dp_mst_port *port,
378 const char *pathprop)
379 {
380 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
381 struct drm_device *dev = master->base.dev;
382 struct amdgpu_device *adev = dev->dev_private;
383 struct amdgpu_dm_connector *aconnector;
384 struct drm_connector *connector;
385
386 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
387 if (!aconnector)
388 return NULL;
389
390 connector = &aconnector->base;
391 aconnector->port = port;
392 aconnector->mst_port = master;
393
394 if (drm_connector_init(
395 dev,
396 connector,
397 &dm_dp_mst_connector_funcs,
398 DRM_MODE_CONNECTOR_DisplayPort)) {
399 kfree(aconnector);
400 return NULL;
401 }
402 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
403
404 amdgpu_dm_connector_init_helper(
405 &adev->dm,
406 aconnector,
407 DRM_MODE_CONNECTOR_DisplayPort,
408 master->dc_link,
409 master->connector_id);
410
411 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
412 drm_connector_attach_encoder(&aconnector->base,
413 &aconnector->mst_encoder->base);
414
415 drm_object_attach_property(
416 &connector->base,
417 dev->mode_config.path_property,
418 0);
419 drm_object_attach_property(
420 &connector->base,
421 dev->mode_config.tile_property,
422 0);
423
424 drm_connector_set_path_property(connector, pathprop);
425
426 /*
427 * Initialize connector state before adding the connectror to drm and
428 * framebuffer lists
429 */
430 amdgpu_dm_connector_funcs_reset(connector);
431
432 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
433 aconnector, connector->base.id, aconnector->mst_port);
434
435 drm_dp_mst_get_port_malloc(port);
436
437 DRM_DEBUG_KMS(":%d\n", connector->base.id);
438
439 return connector;
440 }
441
dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr * mgr,struct drm_connector * connector)442 static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
443 struct drm_connector *connector)
444 {
445 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
446 struct drm_device *dev = master->base.dev;
447 struct amdgpu_device *adev = dev->dev_private;
448 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
449
450 DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
451 aconnector, connector->base.id, aconnector->mst_port);
452
453 if (aconnector->dc_sink) {
454 amdgpu_dm_update_freesync_caps(connector, NULL);
455 dc_link_remove_remote_sink(aconnector->dc_link,
456 aconnector->dc_sink);
457 dc_sink_release(aconnector->dc_sink);
458 aconnector->dc_sink = NULL;
459 }
460
461 drm_connector_unregister(connector);
462 if (adev->mode_info.rfbdev)
463 drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
464 drm_connector_put(connector);
465 }
466
dm_dp_mst_register_connector(struct drm_connector * connector)467 static void dm_dp_mst_register_connector(struct drm_connector *connector)
468 {
469 struct drm_device *dev = connector->dev;
470 struct amdgpu_device *adev = dev->dev_private;
471
472 if (adev->mode_info.rfbdev)
473 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
474 else
475 DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
476
477 drm_connector_register(connector);
478 }
479
480 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
481 .add_connector = dm_dp_add_mst_connector,
482 .destroy_connector = dm_dp_destroy_mst_connector,
483 .register_connector = dm_dp_mst_register_connector
484 };
485
amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector)486 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
487 struct amdgpu_dm_connector *aconnector)
488 {
489 aconnector->dm_dp_aux.aux.name = "dmdc";
490 aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
491 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
492 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
493
494 drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
495 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
496 &aconnector->base);
497
498 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
499 return;
500
501 aconnector->mst_mgr.cbs = &dm_mst_cbs;
502 drm_dp_mst_topology_mgr_init(
503 &aconnector->mst_mgr,
504 dm->adev->ddev,
505 &aconnector->dm_dp_aux.aux,
506 16,
507 4,
508 aconnector->connector_id);
509 }
510
dm_mst_get_pbn_divider(struct dc_link * link)511 int dm_mst_get_pbn_divider(struct dc_link *link)
512 {
513 if (!link)
514 return 0;
515
516 return dc_link_bandwidth_kbps(link,
517 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
518 }
519
520 #if defined(CONFIG_DRM_AMD_DC_DCN)
521
522 struct dsc_mst_fairness_params {
523 struct dc_crtc_timing *timing;
524 struct dc_sink *sink;
525 struct dc_dsc_bw_range bw_range;
526 bool compression_possible;
527 struct drm_dp_mst_port *port;
528 };
529
530 struct dsc_mst_fairness_vars {
531 int pbn;
532 bool dsc_enabled;
533 int bpp_x16;
534 };
535
kbps_to_peak_pbn(int kbps)536 static int kbps_to_peak_pbn(int kbps)
537 {
538 u64 peak_kbps = kbps;
539
540 peak_kbps *= 1006;
541 peak_kbps = div_u64(peak_kbps, 1000);
542 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
543 }
544
set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count)545 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
546 struct dsc_mst_fairness_vars *vars,
547 int count)
548 {
549 int i;
550
551 for (i = 0; i < count; i++) {
552 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
553 if (vars[i].dsc_enabled && dc_dsc_compute_config(
554 params[i].sink->ctx->dc->res_pool->dscs[0],
555 ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps,
556 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
557 0,
558 params[i].timing,
559 ¶ms[i].timing->dsc_cfg)) {
560 params[i].timing->flags.DSC = 1;
561 params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
562 } else {
563 params[i].timing->flags.DSC = 0;
564 }
565 }
566 }
567
bpp_x16_from_pbn(struct dsc_mst_fairness_params param,int pbn)568 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
569 {
570 struct dc_dsc_config dsc_config;
571 u64 kbps;
572
573 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
574 dc_dsc_compute_config(
575 param.sink->ctx->dc->res_pool->dscs[0],
576 ¶m.sink->sink_dsc_caps.dsc_dec_caps,
577 param.sink->ctx->dc->debug.dsc_min_slice_height_override,
578 (int) kbps, param.timing, &dsc_config);
579
580 return dsc_config.bits_per_pixel;
581 }
582
increase_dsc_bpp(struct drm_atomic_state * state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count)583 static void increase_dsc_bpp(struct drm_atomic_state *state,
584 struct dc_link *dc_link,
585 struct dsc_mst_fairness_params *params,
586 struct dsc_mst_fairness_vars *vars,
587 int count)
588 {
589 int i;
590 bool bpp_increased[MAX_PIPES];
591 int initial_slack[MAX_PIPES];
592 int min_initial_slack;
593 int next_index;
594 int remaining_to_increase = 0;
595 int pbn_per_timeslot;
596 int link_timeslots_used;
597 int fair_pbn_alloc;
598
599 for (i = 0; i < count; i++) {
600 if (vars[i].dsc_enabled) {
601 initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
602 bpp_increased[i] = false;
603 remaining_to_increase += 1;
604 } else {
605 initial_slack[i] = 0;
606 bpp_increased[i] = true;
607 }
608 }
609
610 pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
611 dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
612
613 while (remaining_to_increase) {
614 next_index = -1;
615 min_initial_slack = -1;
616 for (i = 0; i < count; i++) {
617 if (!bpp_increased[i]) {
618 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
619 min_initial_slack = initial_slack[i];
620 next_index = i;
621 }
622 }
623 }
624
625 if (next_index == -1)
626 break;
627
628 link_timeslots_used = 0;
629
630 for (i = 0; i < count; i++)
631 link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
632
633 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
634
635 if (initial_slack[next_index] > fair_pbn_alloc) {
636 vars[next_index].pbn += fair_pbn_alloc;
637 if (drm_dp_atomic_find_vcpi_slots(state,
638 params[next_index].port->mgr,
639 params[next_index].port,
640 vars[next_index].pbn,
641 dm_mst_get_pbn_divider(dc_link)) < 0)
642 return;
643 if (!drm_dp_mst_atomic_check(state)) {
644 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
645 } else {
646 vars[next_index].pbn -= fair_pbn_alloc;
647 if (drm_dp_atomic_find_vcpi_slots(state,
648 params[next_index].port->mgr,
649 params[next_index].port,
650 vars[next_index].pbn,
651 dm_mst_get_pbn_divider(dc_link)) < 0)
652 return;
653 }
654 } else {
655 vars[next_index].pbn += initial_slack[next_index];
656 if (drm_dp_atomic_find_vcpi_slots(state,
657 params[next_index].port->mgr,
658 params[next_index].port,
659 vars[next_index].pbn,
660 dm_mst_get_pbn_divider(dc_link)) < 0)
661 return;
662 if (!drm_dp_mst_atomic_check(state)) {
663 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
664 } else {
665 vars[next_index].pbn -= initial_slack[next_index];
666 if (drm_dp_atomic_find_vcpi_slots(state,
667 params[next_index].port->mgr,
668 params[next_index].port,
669 vars[next_index].pbn,
670 dm_mst_get_pbn_divider(dc_link)) < 0)
671 return;
672 }
673 }
674
675 bpp_increased[next_index] = true;
676 remaining_to_increase--;
677 }
678 }
679
try_disable_dsc(struct drm_atomic_state * state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count)680 static void try_disable_dsc(struct drm_atomic_state *state,
681 struct dc_link *dc_link,
682 struct dsc_mst_fairness_params *params,
683 struct dsc_mst_fairness_vars *vars,
684 int count)
685 {
686 int i;
687 bool tried[MAX_PIPES];
688 int kbps_increase[MAX_PIPES];
689 int max_kbps_increase;
690 int next_index;
691 int remaining_to_try = 0;
692
693 for (i = 0; i < count; i++) {
694 if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
695 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
696 tried[i] = false;
697 remaining_to_try += 1;
698 } else {
699 kbps_increase[i] = 0;
700 tried[i] = true;
701 }
702 }
703
704 while (remaining_to_try) {
705 next_index = -1;
706 max_kbps_increase = -1;
707 for (i = 0; i < count; i++) {
708 if (!tried[i]) {
709 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
710 max_kbps_increase = kbps_increase[i];
711 next_index = i;
712 }
713 }
714 }
715
716 if (next_index == -1)
717 break;
718
719 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
720 if (drm_dp_atomic_find_vcpi_slots(state,
721 params[next_index].port->mgr,
722 params[next_index].port,
723 vars[next_index].pbn,
724 0) < 0)
725 return;
726
727 if (!drm_dp_mst_atomic_check(state)) {
728 vars[next_index].dsc_enabled = false;
729 vars[next_index].bpp_x16 = 0;
730 } else {
731 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
732 if (drm_dp_atomic_find_vcpi_slots(state,
733 params[next_index].port->mgr,
734 params[next_index].port,
735 vars[next_index].pbn,
736 dm_mst_get_pbn_divider(dc_link)) < 0)
737 return;
738 }
739
740 tried[next_index] = true;
741 remaining_to_try--;
742 }
743 }
744
compute_mst_dsc_configs_for_link(struct drm_atomic_state * state,struct dc_state * dc_state,struct dc_link * dc_link)745 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
746 struct dc_state *dc_state,
747 struct dc_link *dc_link)
748 {
749 int i;
750 struct dc_stream_state *stream;
751 struct dsc_mst_fairness_params params[MAX_PIPES];
752 struct dsc_mst_fairness_vars vars[MAX_PIPES];
753 struct amdgpu_dm_connector *aconnector;
754 int count = 0;
755
756 memset(params, 0, sizeof(params));
757
758 /* Set up params */
759 for (i = 0; i < dc_state->stream_count; i++) {
760 struct dc_dsc_policy dsc_policy = {0};
761
762 stream = dc_state->streams[i];
763
764 if (stream->link != dc_link)
765 continue;
766
767 stream->timing.flags.DSC = 0;
768
769 params[count].timing = &stream->timing;
770 params[count].sink = stream->sink;
771 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
772 params[count].port = aconnector->port;
773 params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
774 dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
775 if (!dc_dsc_compute_bandwidth_range(
776 stream->sink->ctx->dc->res_pool->dscs[0],
777 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
778 dsc_policy.min_target_bpp,
779 dsc_policy.max_target_bpp,
780 &stream->sink->sink_dsc_caps.dsc_dec_caps,
781 &stream->timing, ¶ms[count].bw_range))
782 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
783
784 count++;
785 }
786 /* Try no compression */
787 for (i = 0; i < count; i++) {
788 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
789 vars[i].dsc_enabled = false;
790 vars[i].bpp_x16 = 0;
791 if (drm_dp_atomic_find_vcpi_slots(state,
792 params[i].port->mgr,
793 params[i].port,
794 vars[i].pbn,
795 0) < 0)
796 return false;
797 }
798 if (!drm_dp_mst_atomic_check(state)) {
799 set_dsc_configs_from_fairness_vars(params, vars, count);
800 return true;
801 }
802
803 /* Try max compression */
804 for (i = 0; i < count; i++) {
805 if (params[i].compression_possible) {
806 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
807 vars[i].dsc_enabled = true;
808 vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
809 if (drm_dp_atomic_find_vcpi_slots(state,
810 params[i].port->mgr,
811 params[i].port,
812 vars[i].pbn,
813 dm_mst_get_pbn_divider(dc_link)) < 0)
814 return false;
815 } else {
816 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
817 vars[i].dsc_enabled = false;
818 vars[i].bpp_x16 = 0;
819 if (drm_dp_atomic_find_vcpi_slots(state,
820 params[i].port->mgr,
821 params[i].port,
822 vars[i].pbn,
823 0) < 0)
824 return false;
825 }
826 }
827 if (drm_dp_mst_atomic_check(state))
828 return false;
829
830 /* Optimize degree of compression */
831 increase_dsc_bpp(state, dc_link, params, vars, count);
832
833 try_disable_dsc(state, dc_link, params, vars, count);
834
835 set_dsc_configs_from_fairness_vars(params, vars, count);
836
837 return true;
838 }
839
compute_mst_dsc_configs_for_state(struct drm_atomic_state * state,struct dc_state * dc_state)840 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
841 struct dc_state *dc_state)
842 {
843 int i, j;
844 struct dc_stream_state *stream;
845 bool computed_streams[MAX_PIPES];
846 struct amdgpu_dm_connector *aconnector;
847
848 for (i = 0; i < dc_state->stream_count; i++)
849 computed_streams[i] = false;
850
851 for (i = 0; i < dc_state->stream_count; i++) {
852 stream = dc_state->streams[i];
853
854 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
855 continue;
856
857 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
858
859 if (!aconnector || !aconnector->dc_sink)
860 continue;
861
862 if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
863 continue;
864
865 if (computed_streams[i])
866 continue;
867
868 mutex_lock(&aconnector->mst_mgr.lock);
869 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
870 mutex_unlock(&aconnector->mst_mgr.lock);
871 return false;
872 }
873 mutex_unlock(&aconnector->mst_mgr.lock);
874
875 for (j = 0; j < dc_state->stream_count; j++) {
876 if (dc_state->streams[j]->link == stream->link)
877 computed_streams[j] = true;
878 }
879 }
880
881 for (i = 0; i < dc_state->stream_count; i++) {
882 stream = dc_state->streams[i];
883
884 if (stream->timing.flags.DSC == 1)
885 dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
886 }
887
888 return true;
889 }
890
891 #endif
892