xref: /linux/drivers/gpu/drm/nouveau/dispnv50/disp.c (revision 1e525507)
1 /*
2  * Copyright 2011 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "disp.h"
25 #include "atom.h"
26 #include "core.h"
27 #include "head.h"
28 #include "wndw.h"
29 #include "handles.h"
30 
31 #include <linux/backlight.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/hdmi.h>
34 #include <linux/component.h>
35 #include <linux/iopoll.h>
36 
37 #include <drm/display/drm_dp_helper.h>
38 #include <drm/display/drm_scdc_helper.h>
39 #include <drm/drm_atomic.h>
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_eld.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_fixed.h>
45 #include <drm/drm_probe_helper.h>
46 #include <drm/drm_vblank.h>
47 
48 #include <nvif/push507c.h>
49 
50 #include <nvif/class.h>
51 #include <nvif/cl0002.h>
52 #include <nvif/event.h>
53 #include <nvif/if0012.h>
54 #include <nvif/if0014.h>
55 #include <nvif/timer.h>
56 
57 #include <nvhw/class/cl507c.h>
58 #include <nvhw/class/cl507d.h>
59 #include <nvhw/class/cl837d.h>
60 #include <nvhw/class/cl887d.h>
61 #include <nvhw/class/cl907d.h>
62 #include <nvhw/class/cl917d.h>
63 
64 #include "nouveau_drv.h"
65 #include "nouveau_dma.h"
66 #include "nouveau_gem.h"
67 #include "nouveau_connector.h"
68 #include "nouveau_encoder.h"
69 #include "nouveau_fence.h"
70 #include "nv50_display.h"
71 
72 /******************************************************************************
73  * EVO channel
74  *****************************************************************************/
75 
76 static int
77 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
78 		 const s32 *oclass, u8 head, void *data, u32 size,
79 		 struct nv50_chan *chan)
80 {
81 	struct nvif_sclass *sclass;
82 	int ret, i, n;
83 
84 	chan->device = device;
85 
86 	ret = n = nvif_object_sclass_get(disp, &sclass);
87 	if (ret < 0)
88 		return ret;
89 
90 	while (oclass[0]) {
91 		for (i = 0; i < n; i++) {
92 			if (sclass[i].oclass == oclass[0]) {
93 				ret = nvif_object_ctor(disp, "kmsChan", 0,
94 						       oclass[0], data, size,
95 						       &chan->user);
96 				if (ret == 0)
97 					nvif_object_map(&chan->user, NULL, 0);
98 				nvif_object_sclass_put(&sclass);
99 				return ret;
100 			}
101 		}
102 		oclass++;
103 	}
104 
105 	nvif_object_sclass_put(&sclass);
106 	return -ENOSYS;
107 }
108 
109 static void
110 nv50_chan_destroy(struct nv50_chan *chan)
111 {
112 	nvif_object_dtor(&chan->user);
113 }
114 
115 /******************************************************************************
116  * DMA EVO channel
117  *****************************************************************************/
118 
119 void
120 nv50_dmac_destroy(struct nv50_dmac *dmac)
121 {
122 	nvif_object_dtor(&dmac->vram);
123 	nvif_object_dtor(&dmac->sync);
124 
125 	nv50_chan_destroy(&dmac->base);
126 
127 	nvif_mem_dtor(&dmac->_push.mem);
128 }
129 
130 static void
131 nv50_dmac_kick(struct nvif_push *push)
132 {
133 	struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
134 
135 	dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
136 	if (dmac->put != dmac->cur) {
137 		/* Push buffer fetches are not coherent with BAR1, we need to ensure
138 		 * writes have been flushed right through to VRAM before writing PUT.
139 		 */
140 		if (dmac->push->mem.type & NVIF_MEM_VRAM) {
141 			struct nvif_device *device = dmac->base.device;
142 			nvif_wr32(&device->object, 0x070000, 0x00000001);
143 			nvif_msec(device, 2000,
144 				if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
145 					break;
146 			);
147 		}
148 
149 		NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
150 		dmac->put = dmac->cur;
151 	}
152 
153 	push->bgn = push->cur;
154 }
155 
156 static int
157 nv50_dmac_free(struct nv50_dmac *dmac)
158 {
159 	u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
160 	if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
161 		return get - dmac->cur - 5;
162 	return dmac->max - dmac->cur;
163 }
164 
165 static int
166 nv50_dmac_wind(struct nv50_dmac *dmac)
167 {
168 	/* Wait for GET to depart from the beginning of the push buffer to
169 	 * prevent writing PUT == GET, which would be ignored by HW.
170 	 */
171 	u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
172 	if (get == 0) {
173 		/* Corner-case, HW idle, but non-committed work pending. */
174 		if (dmac->put == 0)
175 			nv50_dmac_kick(dmac->push);
176 
177 		if (nvif_msec(dmac->base.device, 2000,
178 			if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
179 				break;
180 		) < 0)
181 			return -ETIMEDOUT;
182 	}
183 
184 	PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
185 	dmac->cur = 0;
186 	return 0;
187 }
188 
189 static int
190 nv50_dmac_wait(struct nvif_push *push, u32 size)
191 {
192 	struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
193 	int free;
194 
195 	if (WARN_ON(size > dmac->max))
196 		return -EINVAL;
197 
198 	dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
199 	if (dmac->cur + size >= dmac->max) {
200 		int ret = nv50_dmac_wind(dmac);
201 		if (ret)
202 			return ret;
203 
204 		push->cur = dmac->_push.mem.object.map.ptr;
205 		push->cur = push->cur + dmac->cur;
206 		nv50_dmac_kick(push);
207 	}
208 
209 	if (nvif_msec(dmac->base.device, 2000,
210 		if ((free = nv50_dmac_free(dmac)) >= size)
211 			break;
212 	) < 0) {
213 		WARN_ON(1);
214 		return -ETIMEDOUT;
215 	}
216 
217 	push->bgn = dmac->_push.mem.object.map.ptr;
218 	push->bgn = push->bgn + dmac->cur;
219 	push->cur = push->bgn;
220 	push->end = push->cur + free;
221 	return 0;
222 }
223 
224 MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
225 static int nv50_dmac_vram_pushbuf = -1;
226 module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
227 
228 int
229 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
230 		 const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
231 		 struct nv50_dmac *dmac)
232 {
233 	struct nouveau_cli *cli = (void *)device->object.client;
234 	struct nvif_disp_chan_v0 *args = data;
235 	u8 type = NVIF_MEM_COHERENT;
236 	int ret;
237 
238 	mutex_init(&dmac->lock);
239 
240 	/* Pascal added support for 47-bit physical addresses, but some
241 	 * parts of EVO still only accept 40-bit PAs.
242 	 *
243 	 * To avoid issues on systems with large amounts of RAM, and on
244 	 * systems where an IOMMU maps pages at a high address, we need
245 	 * to allocate push buffers in VRAM instead.
246 	 *
247 	 * This appears to match NVIDIA's behaviour on Pascal.
248 	 */
249 	if ((nv50_dmac_vram_pushbuf > 0) ||
250 	    (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
251 		type |= NVIF_MEM_VRAM;
252 
253 	ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
254 				&dmac->_push.mem);
255 	if (ret)
256 		return ret;
257 
258 	dmac->ptr = dmac->_push.mem.object.map.ptr;
259 	dmac->_push.wait = nv50_dmac_wait;
260 	dmac->_push.kick = nv50_dmac_kick;
261 	dmac->push = &dmac->_push;
262 	dmac->push->bgn = dmac->_push.mem.object.map.ptr;
263 	dmac->push->cur = dmac->push->bgn;
264 	dmac->push->end = dmac->push->bgn;
265 	dmac->max = 0x1000/4 - 1;
266 
267 	/* EVO channels are affected by a HW bug where the last 12 DWORDs
268 	 * of the push buffer aren't able to be used safely.
269 	 */
270 	if (disp->oclass < GV100_DISP)
271 		dmac->max -= 12;
272 
273 	args->pushbuf = nvif_handle(&dmac->_push.mem.object);
274 
275 	ret = nv50_chan_create(device, disp, oclass, head, data, size,
276 			       &dmac->base);
277 	if (ret)
278 		return ret;
279 
280 	if (syncbuf < 0)
281 		return 0;
282 
283 	ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
284 			       NV_DMA_IN_MEMORY,
285 			       &(struct nv_dma_v0) {
286 					.target = NV_DMA_V0_TARGET_VRAM,
287 					.access = NV_DMA_V0_ACCESS_RDWR,
288 					.start = syncbuf + 0x0000,
289 					.limit = syncbuf + 0x0fff,
290 			       }, sizeof(struct nv_dma_v0),
291 			       &dmac->sync);
292 	if (ret)
293 		return ret;
294 
295 	ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
296 			       NV_DMA_IN_MEMORY,
297 			       &(struct nv_dma_v0) {
298 					.target = NV_DMA_V0_TARGET_VRAM,
299 					.access = NV_DMA_V0_ACCESS_RDWR,
300 					.start = 0,
301 					.limit = device->info.ram_user - 1,
302 			       }, sizeof(struct nv_dma_v0),
303 			       &dmac->vram);
304 	if (ret)
305 		return ret;
306 
307 	return ret;
308 }
309 
310 /******************************************************************************
311  * Output path helpers
312  *****************************************************************************/
313 static void
314 nv50_outp_dump_caps(struct nouveau_drm *drm,
315 		    struct nouveau_encoder *outp)
316 {
317 	NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
318 		 outp->base.base.name, outp->caps.dp_interlace);
319 }
320 
321 static int
322 nv50_outp_atomic_check_view(struct drm_encoder *encoder,
323 			    struct drm_crtc_state *crtc_state,
324 			    struct drm_connector_state *conn_state,
325 			    struct drm_display_mode *native_mode)
326 {
327 	struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
328 	struct drm_display_mode *mode = &crtc_state->mode;
329 	struct drm_connector *connector = conn_state->connector;
330 	struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
331 	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
332 
333 	NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
334 	asyc->scaler.full = false;
335 	if (!native_mode)
336 		return 0;
337 
338 	if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
339 		switch (connector->connector_type) {
340 		case DRM_MODE_CONNECTOR_LVDS:
341 		case DRM_MODE_CONNECTOR_eDP:
342 			/* Don't force scaler for EDID modes with
343 			 * same size as the native one (e.g. different
344 			 * refresh rate)
345 			 */
346 			if (mode->hdisplay == native_mode->hdisplay &&
347 			    mode->vdisplay == native_mode->vdisplay &&
348 			    mode->type & DRM_MODE_TYPE_DRIVER)
349 				break;
350 			mode = native_mode;
351 			asyc->scaler.full = true;
352 			break;
353 		default:
354 			break;
355 		}
356 	} else {
357 		mode = native_mode;
358 	}
359 
360 	if (!drm_mode_equal(adjusted_mode, mode)) {
361 		drm_mode_copy(adjusted_mode, mode);
362 		crtc_state->mode_changed = true;
363 	}
364 
365 	return 0;
366 }
367 
368 static void
369 nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
370 {
371 	struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
372 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
373 	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
374 	unsigned int max_rate, mode_rate;
375 
376 	switch (nv_encoder->dcb->type) {
377 	case DCB_OUTPUT_DP:
378 		max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
379 
380 		/* we don't support more than 10 anyway */
381 		asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
382 
383 		/* reduce the bpc until it works out */
384 		while (asyh->or.bpc > 6) {
385 			mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
386 			if (mode_rate <= max_rate)
387 				break;
388 
389 			asyh->or.bpc -= 2;
390 		}
391 		break;
392 	default:
393 		break;
394 	}
395 }
396 
397 static int
398 nv50_outp_atomic_check(struct drm_encoder *encoder,
399 		       struct drm_crtc_state *crtc_state,
400 		       struct drm_connector_state *conn_state)
401 {
402 	struct drm_connector *connector = conn_state->connector;
403 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
404 	struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
405 	int ret;
406 
407 	ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
408 					  nv_connector->native_mode);
409 	if (ret)
410 		return ret;
411 
412 	if (crtc_state->mode_changed || crtc_state->connectors_changed)
413 		asyh->or.bpc = connector->display_info.bpc;
414 
415 	/* We might have to reduce the bpc */
416 	nv50_outp_atomic_fix_depth(encoder, crtc_state);
417 
418 	return 0;
419 }
420 
421 struct nouveau_connector *
422 nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
423 {
424 	struct drm_connector *connector;
425 	struct drm_connector_state *connector_state;
426 	struct drm_encoder *encoder = to_drm_encoder(outp);
427 	int i;
428 
429 	for_each_new_connector_in_state(state, connector, connector_state, i) {
430 		if (connector_state->best_encoder == encoder)
431 			return nouveau_connector(connector);
432 	}
433 
434 	return NULL;
435 }
436 
437 struct nouveau_connector *
438 nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
439 {
440 	struct drm_connector *connector;
441 	struct drm_connector_state *connector_state;
442 	struct drm_encoder *encoder = to_drm_encoder(outp);
443 	int i;
444 
445 	for_each_old_connector_in_state(state, connector, connector_state, i) {
446 		if (connector_state->best_encoder == encoder)
447 			return nouveau_connector(connector);
448 	}
449 
450 	return NULL;
451 }
452 
453 static struct nouveau_crtc *
454 nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
455 {
456 	struct drm_crtc *crtc;
457 	struct drm_crtc_state *crtc_state;
458 	const u32 mask = drm_encoder_mask(&outp->base.base);
459 	int i;
460 
461 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
462 		if (crtc_state->encoder_mask & mask)
463 			return nouveau_crtc(crtc);
464 	}
465 
466 	return NULL;
467 }
468 
469 /******************************************************************************
470  * DAC
471  *****************************************************************************/
472 static void
473 nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
474 {
475 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
476 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
477 	const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
478 
479 	core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
480 	nv_encoder->crtc = NULL;
481 }
482 
483 static void
484 nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
485 {
486 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
487 	struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
488 	struct nv50_head_atom *asyh =
489 		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
490 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
491 	u32 ctrl = 0;
492 
493 	switch (nv_crtc->index) {
494 	case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break;
495 	case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break;
496 	case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break;
497 	case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break;
498 	default:
499 		WARN_ON(1);
500 		break;
501 	}
502 
503 	ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
504 
505 	if (!nvif_outp_acquired(&nv_encoder->outp))
506 		nvif_outp_acquire_dac(&nv_encoder->outp);
507 
508 	core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
509 	asyh->or.depth = 0;
510 
511 	nv_encoder->crtc = &nv_crtc->base;
512 }
513 
514 static enum drm_connector_status
515 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
516 {
517 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
518 	u32 loadval;
519 	int ret;
520 
521 	loadval = nouveau_drm(encoder->dev)->vbios.dactestval;
522 	if (loadval == 0)
523 		loadval = 340;
524 
525 	ret = nvif_outp_load_detect(&nv_encoder->outp, loadval);
526 	if (ret <= 0)
527 		return connector_status_disconnected;
528 
529 	return connector_status_connected;
530 }
531 
532 static const struct drm_encoder_helper_funcs
533 nv50_dac_help = {
534 	.atomic_check = nv50_outp_atomic_check,
535 	.atomic_enable = nv50_dac_atomic_enable,
536 	.atomic_disable = nv50_dac_atomic_disable,
537 	.detect = nv50_dac_detect
538 };
539 
540 static void
541 nv50_dac_destroy(struct drm_encoder *encoder)
542 {
543 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
544 
545 	nvif_outp_dtor(&nv_encoder->outp);
546 
547 	drm_encoder_cleanup(encoder);
548 	kfree(encoder);
549 }
550 
551 static const struct drm_encoder_funcs
552 nv50_dac_func = {
553 	.destroy = nv50_dac_destroy,
554 };
555 
556 static int
557 nv50_dac_create(struct nouveau_encoder *nv_encoder)
558 {
559 	struct drm_connector *connector = &nv_encoder->conn->base;
560 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
561 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
562 	struct nvkm_i2c_bus *bus;
563 	struct drm_encoder *encoder;
564 	struct dcb_output *dcbe = nv_encoder->dcb;
565 	int type = DRM_MODE_ENCODER_DAC;
566 
567 	bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
568 	if (bus)
569 		nv_encoder->i2c = &bus->i2c;
570 
571 	encoder = to_drm_encoder(nv_encoder);
572 	drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
573 			 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
574 	drm_encoder_helper_add(encoder, &nv50_dac_help);
575 
576 	drm_connector_attach_encoder(connector, encoder);
577 	return 0;
578 }
579 
580 /*
581  * audio component binding for ELD notification
582  */
583 static void
584 nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
585 				int dev_id)
586 {
587 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
588 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
589 						 port, dev_id);
590 }
591 
592 static int
593 nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
594 			     bool *enabled, unsigned char *buf, int max_bytes)
595 {
596 	struct drm_device *drm_dev = dev_get_drvdata(kdev);
597 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
598 	struct drm_encoder *encoder;
599 	struct nouveau_encoder *nv_encoder;
600 	struct nouveau_crtc *nv_crtc;
601 	int ret = 0;
602 
603 	*enabled = false;
604 
605 	mutex_lock(&drm->audio.lock);
606 
607 	drm_for_each_encoder(encoder, drm->dev) {
608 		struct nouveau_connector *nv_connector = NULL;
609 
610 		if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
611 			continue; /* TODO */
612 
613 		nv_encoder = nouveau_encoder(encoder);
614 		nv_connector = nv_encoder->conn;
615 		nv_crtc = nouveau_crtc(nv_encoder->crtc);
616 
617 		if (!nv_crtc || nv_encoder->outp.or.id != port || nv_crtc->index != dev_id)
618 			continue;
619 
620 		*enabled = nv_encoder->audio.enabled;
621 		if (*enabled) {
622 			ret = drm_eld_size(nv_connector->base.eld);
623 			memcpy(buf, nv_connector->base.eld,
624 			       min(max_bytes, ret));
625 		}
626 		break;
627 	}
628 
629 	mutex_unlock(&drm->audio.lock);
630 
631 	return ret;
632 }
633 
634 static const struct drm_audio_component_ops nv50_audio_component_ops = {
635 	.get_eld = nv50_audio_component_get_eld,
636 };
637 
638 static int
639 nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
640 			  void *data)
641 {
642 	struct drm_device *drm_dev = dev_get_drvdata(kdev);
643 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
644 	struct drm_audio_component *acomp = data;
645 
646 	if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
647 		return -ENOMEM;
648 
649 	drm_modeset_lock_all(drm_dev);
650 	acomp->ops = &nv50_audio_component_ops;
651 	acomp->dev = kdev;
652 	drm->audio.component = acomp;
653 	drm_modeset_unlock_all(drm_dev);
654 	return 0;
655 }
656 
657 static void
658 nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
659 			    void *data)
660 {
661 	struct drm_device *drm_dev = dev_get_drvdata(kdev);
662 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
663 	struct drm_audio_component *acomp = data;
664 
665 	drm_modeset_lock_all(drm_dev);
666 	drm->audio.component = NULL;
667 	acomp->ops = NULL;
668 	acomp->dev = NULL;
669 	drm_modeset_unlock_all(drm_dev);
670 }
671 
672 static const struct component_ops nv50_audio_component_bind_ops = {
673 	.bind   = nv50_audio_component_bind,
674 	.unbind = nv50_audio_component_unbind,
675 };
676 
677 static void
678 nv50_audio_component_init(struct nouveau_drm *drm)
679 {
680 	if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
681 		return;
682 
683 	drm->audio.component_registered = true;
684 	mutex_init(&drm->audio.lock);
685 }
686 
687 static void
688 nv50_audio_component_fini(struct nouveau_drm *drm)
689 {
690 	if (!drm->audio.component_registered)
691 		return;
692 
693 	component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
694 	drm->audio.component_registered = false;
695 	mutex_destroy(&drm->audio.lock);
696 }
697 
698 /******************************************************************************
699  * Audio
700  *****************************************************************************/
701 static bool
702 nv50_audio_supported(struct drm_encoder *encoder)
703 {
704 	struct nv50_disp *disp = nv50_disp(encoder->dev);
705 
706 	if (disp->disp->object.oclass <= GT200_DISP ||
707 	    disp->disp->object.oclass == GT206_DISP)
708 		return false;
709 
710 	if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
711 		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
712 
713 		switch (nv_encoder->dcb->type) {
714 		case DCB_OUTPUT_TMDS:
715 		case DCB_OUTPUT_DP:
716 			break;
717 		default:
718 			return false;
719 		}
720 	}
721 
722 	return true;
723 }
724 
725 static void
726 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
727 {
728 	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
729 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
730 	struct nvif_outp *outp = &nv_encoder->outp;
731 
732 	if (!nv50_audio_supported(encoder))
733 		return;
734 
735 	mutex_lock(&drm->audio.lock);
736 	if (nv_encoder->audio.enabled) {
737 		nv_encoder->audio.enabled = false;
738 		nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, NULL, 0);
739 	}
740 	mutex_unlock(&drm->audio.lock);
741 
742 	nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index);
743 }
744 
745 static void
746 nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
747 		  struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
748 		  struct drm_display_mode *mode)
749 {
750 	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
751 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
752 	struct nvif_outp *outp = &nv_encoder->outp;
753 
754 	if (!nv50_audio_supported(encoder) || !drm_detect_monitor_audio(nv_connector->edid))
755 		return;
756 
757 	mutex_lock(&drm->audio.lock);
758 
759 	nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, nv_connector->base.eld,
760 			  drm_eld_size(nv_connector->base.eld));
761 	nv_encoder->audio.enabled = true;
762 
763 	mutex_unlock(&drm->audio.lock);
764 
765 	nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index);
766 }
767 
768 /******************************************************************************
769  * HDMI
770  *****************************************************************************/
771 static void
772 nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
773 		 struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
774 		 struct drm_display_mode *mode, bool hda)
775 {
776 	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
777 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
778 	struct drm_hdmi_info *hdmi = &nv_connector->base.display_info.hdmi;
779 	union hdmi_infoframe infoframe = { 0 };
780 	const u8 rekey = 56; /* binary driver, and tegra, constant */
781 	u32 max_ac_packet;
782 	struct {
783 		struct nvif_outp_infoframe_v0 infoframe;
784 		u8 data[17];
785 	} args = { 0 };
786 	int ret, size;
787 
788 	max_ac_packet  = mode->htotal - mode->hdisplay;
789 	max_ac_packet -= rekey;
790 	max_ac_packet -= 18; /* constant from tegra */
791 	max_ac_packet /= 32;
792 
793 	if (nv_encoder->i2c && hdmi->scdc.scrambling.supported) {
794 		const bool high_tmds_clock_ratio = mode->clock > 340000;
795 		u8 scdc;
796 
797 		ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &scdc);
798 		if (ret < 0) {
799 			NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
800 			return;
801 		}
802 
803 		scdc &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
804 		if (high_tmds_clock_ratio || hdmi->scdc.scrambling.low_rates)
805 			scdc |= SCDC_SCRAMBLING_ENABLE;
806 		if (high_tmds_clock_ratio)
807 			scdc |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
808 
809 		ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, scdc);
810 		if (ret < 0)
811 			NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
812 				 scdc, ret);
813 	}
814 
815 	ret = nvif_outp_hdmi(&nv_encoder->outp, nv_crtc->index, true, max_ac_packet, rekey,
816 			     mode->clock, hdmi->scdc.supported, hdmi->scdc.scrambling.supported,
817 			     hdmi->scdc.scrambling.low_rates);
818 	if (ret)
819 		return;
820 
821 	/* AVI InfoFrame. */
822 	args.infoframe.version = 0;
823 	args.infoframe.head = nv_crtc->index;
824 
825 	if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
826 		drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
827 						   HDMI_QUANTIZATION_RANGE_FULL);
828 
829 		size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
830 	} else {
831 		size = 0;
832 	}
833 
834 	nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
835 
836 	/* Vendor InfoFrame. */
837 	memset(&args.data, 0, sizeof(args.data));
838 	if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
839 							 &nv_connector->base, mode))
840 		size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
841 	else
842 		size = 0;
843 
844 	nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
845 
846 	nv_encoder->hdmi.enabled = true;
847 }
848 
849 /******************************************************************************
850  * MST
851  *****************************************************************************/
852 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
853 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
854 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
855 
856 struct nv50_mstc {
857 	struct nv50_mstm *mstm;
858 	struct drm_dp_mst_port *port;
859 	struct drm_connector connector;
860 
861 	struct drm_display_mode *native;
862 	struct edid *edid;
863 };
864 
865 struct nv50_msto {
866 	struct drm_encoder encoder;
867 
868 	/* head is statically assigned on msto creation */
869 	struct nv50_head *head;
870 	struct nv50_mstc *mstc;
871 	bool disabled;
872 	bool enabled;
873 
874 	u32 display_id;
875 };
876 
877 struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
878 {
879 	struct nv50_msto *msto;
880 
881 	if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
882 		return nouveau_encoder(encoder);
883 
884 	msto = nv50_msto(encoder);
885 	if (!msto->mstc)
886 		return NULL;
887 	return msto->mstc->mstm->outp;
888 }
889 
890 static void
891 nv50_msto_cleanup(struct drm_atomic_state *state,
892 		  struct drm_dp_mst_topology_state *new_mst_state,
893 		  struct drm_dp_mst_topology_mgr *mgr,
894 		  struct nv50_msto *msto)
895 {
896 	struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
897 	struct drm_dp_mst_atomic_payload *new_payload =
898 		drm_atomic_get_mst_payload_state(new_mst_state, msto->mstc->port);
899 	struct drm_dp_mst_topology_state *old_mst_state =
900 		drm_atomic_get_old_mst_topology_state(state, mgr);
901 	const struct drm_dp_mst_atomic_payload *old_payload =
902 		drm_atomic_get_mst_payload_state(old_mst_state, msto->mstc->port);
903 	struct nv50_mstc *mstc = msto->mstc;
904 	struct nv50_mstm *mstm = mstc->mstm;
905 
906 	NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
907 
908 	if (msto->disabled) {
909 		if (msto->head->func->display_id) {
910 			nvif_outp_dp_mst_id_put(&mstm->outp->outp, msto->display_id);
911 			msto->display_id = 0;
912 		}
913 
914 		msto->mstc = NULL;
915 		msto->disabled = false;
916 		drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
917 	} else if (msto->enabled) {
918 		drm_dp_add_payload_part2(mgr, state, new_payload);
919 		msto->enabled = false;
920 	}
921 }
922 
923 static void
924 nv50_msto_prepare(struct drm_atomic_state *state,
925 		  struct drm_dp_mst_topology_state *mst_state,
926 		  struct drm_dp_mst_topology_mgr *mgr,
927 		  struct nv50_msto *msto)
928 {
929 	struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
930 	struct nv50_mstc *mstc = msto->mstc;
931 	struct nv50_mstm *mstm = mstc->mstm;
932 	struct drm_dp_mst_atomic_payload *payload;
933 	int ret = 0;
934 
935 	NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
936 
937 	payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
938 
939 	if (msto->disabled) {
940 		drm_dp_remove_payload_part1(mgr, mst_state, payload);
941 		nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
942 		ret = 1;
943 	} else {
944 		if (msto->enabled)
945 			ret = drm_dp_add_payload_part1(mgr, mst_state, payload);
946 	}
947 
948 	if (ret == 0) {
949 		nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
950 				      payload->vc_start_slot, payload->time_slots,
951 				      payload->pbn,
952 				      payload->time_slots * dfixed_trunc(mst_state->pbn_div));
953 	} else {
954 		nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
955 	}
956 }
957 
958 static int
959 nv50_msto_atomic_check(struct drm_encoder *encoder,
960 		       struct drm_crtc_state *crtc_state,
961 		       struct drm_connector_state *conn_state)
962 {
963 	struct drm_atomic_state *state = crtc_state->state;
964 	struct drm_connector *connector = conn_state->connector;
965 	struct drm_dp_mst_topology_state *mst_state;
966 	struct nv50_mstc *mstc = nv50_mstc(connector);
967 	struct nv50_mstm *mstm = mstc->mstm;
968 	struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
969 	int slots;
970 	int ret;
971 
972 	ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
973 					  mstc->native);
974 	if (ret)
975 		return ret;
976 
977 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
978 		return 0;
979 
980 	/*
981 	 * When restoring duplicated states, we need to make sure that the bw
982 	 * remains the same and avoid recalculating it, as the connector's bpc
983 	 * may have changed after the state was duplicated
984 	 */
985 	if (!state->duplicated) {
986 		const int clock = crtc_state->adjusted_mode.clock;
987 
988 		asyh->or.bpc = connector->display_info.bpc;
989 		asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3 << 4);
990 	}
991 
992 	mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
993 	if (IS_ERR(mst_state))
994 		return PTR_ERR(mst_state);
995 
996 	if (!mst_state->pbn_div.full) {
997 		struct nouveau_encoder *outp = mstc->mstm->outp;
998 
999 		mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
1000 							      outp->dp.link_bw, outp->dp.link_nr);
1001 	}
1002 
1003 	slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
1004 	if (slots < 0)
1005 		return slots;
1006 
1007 	asyh->dp.tu = slots;
1008 
1009 	return 0;
1010 }
1011 
1012 static u8
1013 nv50_dp_bpc_to_depth(unsigned int bpc)
1014 {
1015 	switch (bpc) {
1016 	case  6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444;
1017 	case  8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444;
1018 	case 10:
1019 	default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444;
1020 	}
1021 }
1022 
1023 static void
1024 nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
1025 {
1026 	struct nv50_msto *msto = nv50_msto(encoder);
1027 	struct nv50_head *head = msto->head;
1028 	struct nv50_head_atom *asyh =
1029 		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
1030 	struct nv50_mstc *mstc = NULL;
1031 	struct nv50_mstm *mstm = NULL;
1032 	struct drm_connector *connector;
1033 	struct drm_connector_list_iter conn_iter;
1034 	u8 proto;
1035 
1036 	drm_connector_list_iter_begin(encoder->dev, &conn_iter);
1037 	drm_for_each_connector_iter(connector, &conn_iter) {
1038 		if (connector->state->best_encoder == &msto->encoder) {
1039 			mstc = nv50_mstc(connector);
1040 			mstm = mstc->mstm;
1041 			break;
1042 		}
1043 	}
1044 	drm_connector_list_iter_end(&conn_iter);
1045 
1046 	if (WARN_ON(!mstc))
1047 		return;
1048 
1049 	if (!mstm->links++) {
1050 		nvif_outp_acquire_sor(&mstm->outp->outp, false /*TODO: MST audio... */);
1051 		nouveau_dp_train(mstm->outp, true, 0, 0);
1052 	}
1053 
1054 	if (head->func->display_id) {
1055 		if (!WARN_ON(nvif_outp_dp_mst_id_get(&mstm->outp->outp, &msto->display_id)))
1056 			head->func->display_id(head, msto->display_id);
1057 	}
1058 
1059 	if (mstm->outp->outp.or.link & 1)
1060 		proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A;
1061 	else
1062 		proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
1063 
1064 	mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
1065 			   nv50_dp_bpc_to_depth(asyh->or.bpc));
1066 
1067 	msto->mstc = mstc;
1068 	msto->enabled = true;
1069 	mstm->modified = true;
1070 }
1071 
1072 static void
1073 nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
1074 {
1075 	struct nv50_msto *msto = nv50_msto(encoder);
1076 	struct nv50_mstc *mstc = msto->mstc;
1077 	struct nv50_mstm *mstm = mstc->mstm;
1078 
1079 	if (msto->head->func->display_id)
1080 		msto->head->func->display_id(msto->head, 0);
1081 
1082 	mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
1083 	mstm->modified = true;
1084 	if (!--mstm->links)
1085 		mstm->disabled = true;
1086 	msto->disabled = true;
1087 }
1088 
1089 static const struct drm_encoder_helper_funcs
1090 nv50_msto_help = {
1091 	.atomic_disable = nv50_msto_atomic_disable,
1092 	.atomic_enable = nv50_msto_atomic_enable,
1093 	.atomic_check = nv50_msto_atomic_check,
1094 };
1095 
1096 static void
1097 nv50_msto_destroy(struct drm_encoder *encoder)
1098 {
1099 	struct nv50_msto *msto = nv50_msto(encoder);
1100 	drm_encoder_cleanup(&msto->encoder);
1101 	kfree(msto);
1102 }
1103 
1104 static const struct drm_encoder_funcs
1105 nv50_msto = {
1106 	.destroy = nv50_msto_destroy,
1107 };
1108 
1109 static struct nv50_msto *
1110 nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
1111 {
1112 	struct nv50_msto *msto;
1113 	int ret;
1114 
1115 	msto = kzalloc(sizeof(*msto), GFP_KERNEL);
1116 	if (!msto)
1117 		return ERR_PTR(-ENOMEM);
1118 
1119 	ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
1120 			       DRM_MODE_ENCODER_DPMST, "mst-%d", id);
1121 	if (ret) {
1122 		kfree(msto);
1123 		return ERR_PTR(ret);
1124 	}
1125 
1126 	drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
1127 	msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
1128 	msto->head = head;
1129 	return msto;
1130 }
1131 
1132 static struct drm_encoder *
1133 nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
1134 			      struct drm_atomic_state *state)
1135 {
1136 	struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
1137 											 connector);
1138 	struct nv50_mstc *mstc = nv50_mstc(connector);
1139 	struct drm_crtc *crtc = connector_state->crtc;
1140 
1141 	if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
1142 		return NULL;
1143 
1144 	return &nv50_head(crtc)->msto->encoder;
1145 }
1146 
1147 static enum drm_mode_status
1148 nv50_mstc_mode_valid(struct drm_connector *connector,
1149 		     struct drm_display_mode *mode)
1150 {
1151 	struct nv50_mstc *mstc = nv50_mstc(connector);
1152 	struct nouveau_encoder *outp = mstc->mstm->outp;
1153 
1154 	/* TODO: calculate the PBN from the dotclock and validate against the
1155 	 * MSTB's max possible PBN
1156 	 */
1157 
1158 	return nv50_dp_mode_valid(outp, mode, NULL);
1159 }
1160 
1161 static int
1162 nv50_mstc_get_modes(struct drm_connector *connector)
1163 {
1164 	struct nv50_mstc *mstc = nv50_mstc(connector);
1165 	int ret = 0;
1166 
1167 	mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
1168 	drm_connector_update_edid_property(&mstc->connector, mstc->edid);
1169 	if (mstc->edid)
1170 		ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
1171 
1172 	/*
1173 	 * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
1174 	 * to 8 to save bandwidth on the topology. In the future, we'll want
1175 	 * to properly fix this by dynamically selecting the highest possible
1176 	 * bpc that would fit in the topology
1177 	 */
1178 	if (connector->display_info.bpc)
1179 		connector->display_info.bpc =
1180 			clamp(connector->display_info.bpc, 6U, 8U);
1181 	else
1182 		connector->display_info.bpc = 8;
1183 
1184 	if (mstc->native)
1185 		drm_mode_destroy(mstc->connector.dev, mstc->native);
1186 	mstc->native = nouveau_conn_native_mode(&mstc->connector);
1187 	return ret;
1188 }
1189 
1190 static int
1191 nv50_mstc_atomic_check(struct drm_connector *connector,
1192 		       struct drm_atomic_state *state)
1193 {
1194 	struct nv50_mstc *mstc = nv50_mstc(connector);
1195 	struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
1196 
1197 	return drm_dp_atomic_release_time_slots(state, mgr, mstc->port);
1198 }
1199 
1200 static int
1201 nv50_mstc_detect(struct drm_connector *connector,
1202 		 struct drm_modeset_acquire_ctx *ctx, bool force)
1203 {
1204 	struct nv50_mstc *mstc = nv50_mstc(connector);
1205 	int ret;
1206 
1207 	if (drm_connector_is_unregistered(connector))
1208 		return connector_status_disconnected;
1209 
1210 	ret = pm_runtime_get_sync(connector->dev->dev);
1211 	if (ret < 0 && ret != -EACCES) {
1212 		pm_runtime_put_autosuspend(connector->dev->dev);
1213 		return connector_status_disconnected;
1214 	}
1215 
1216 	ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
1217 				     mstc->port);
1218 	if (ret != connector_status_connected)
1219 		goto out;
1220 
1221 out:
1222 	pm_runtime_mark_last_busy(connector->dev->dev);
1223 	pm_runtime_put_autosuspend(connector->dev->dev);
1224 	return ret;
1225 }
1226 
1227 static const struct drm_connector_helper_funcs
1228 nv50_mstc_help = {
1229 	.get_modes = nv50_mstc_get_modes,
1230 	.mode_valid = nv50_mstc_mode_valid,
1231 	.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
1232 	.atomic_check = nv50_mstc_atomic_check,
1233 	.detect_ctx = nv50_mstc_detect,
1234 };
1235 
1236 static void
1237 nv50_mstc_destroy(struct drm_connector *connector)
1238 {
1239 	struct nv50_mstc *mstc = nv50_mstc(connector);
1240 
1241 	drm_connector_cleanup(&mstc->connector);
1242 	drm_dp_mst_put_port_malloc(mstc->port);
1243 
1244 	kfree(mstc);
1245 }
1246 
1247 static const struct drm_connector_funcs
1248 nv50_mstc = {
1249 	.reset = nouveau_conn_reset,
1250 	.fill_modes = drm_helper_probe_single_connector_modes,
1251 	.destroy = nv50_mstc_destroy,
1252 	.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
1253 	.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
1254 	.atomic_set_property = nouveau_conn_atomic_set_property,
1255 	.atomic_get_property = nouveau_conn_atomic_get_property,
1256 };
1257 
1258 static int
1259 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
1260 	      const char *path, struct nv50_mstc **pmstc)
1261 {
1262 	struct drm_device *dev = mstm->outp->base.base.dev;
1263 	struct drm_crtc *crtc;
1264 	struct nv50_mstc *mstc;
1265 	int ret;
1266 
1267 	if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
1268 		return -ENOMEM;
1269 	mstc->mstm = mstm;
1270 	mstc->port = port;
1271 
1272 	ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
1273 				 DRM_MODE_CONNECTOR_DisplayPort);
1274 	if (ret) {
1275 		kfree(*pmstc);
1276 		*pmstc = NULL;
1277 		return ret;
1278 	}
1279 
1280 	drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
1281 
1282 	mstc->connector.funcs->reset(&mstc->connector);
1283 	nouveau_conn_attach_properties(&mstc->connector);
1284 
1285 	drm_for_each_crtc(crtc, dev) {
1286 		if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
1287 			continue;
1288 
1289 		drm_connector_attach_encoder(&mstc->connector,
1290 					     &nv50_head(crtc)->msto->encoder);
1291 	}
1292 
1293 	drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
1294 	drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
1295 	drm_connector_set_path_property(&mstc->connector, path);
1296 	drm_dp_mst_get_port_malloc(port);
1297 	return 0;
1298 }
1299 
1300 static void
1301 nv50_mstm_cleanup(struct drm_atomic_state *state,
1302 		  struct drm_dp_mst_topology_state *mst_state,
1303 		  struct nv50_mstm *mstm)
1304 {
1305 	struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
1306 	struct drm_encoder *encoder;
1307 
1308 	NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
1309 	drm_dp_check_act_status(&mstm->mgr);
1310 
1311 	drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
1312 		if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
1313 			struct nv50_msto *msto = nv50_msto(encoder);
1314 			struct nv50_mstc *mstc = msto->mstc;
1315 			if (mstc && mstc->mstm == mstm)
1316 				nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
1317 		}
1318 	}
1319 
1320 	if (mstm->disabled) {
1321 		nouveau_dp_power_down(mstm->outp);
1322 		nvif_outp_release(&mstm->outp->outp);
1323 		mstm->disabled = false;
1324 	}
1325 
1326 	mstm->modified = false;
1327 }
1328 
1329 static void
1330 nv50_mstm_prepare(struct drm_atomic_state *state,
1331 		  struct drm_dp_mst_topology_state *mst_state,
1332 		  struct nv50_mstm *mstm)
1333 {
1334 	struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
1335 	struct drm_encoder *encoder;
1336 
1337 	NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
1338 
1339 	/* Disable payloads first */
1340 	drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
1341 		if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
1342 			struct nv50_msto *msto = nv50_msto(encoder);
1343 			struct nv50_mstc *mstc = msto->mstc;
1344 			if (mstc && mstc->mstm == mstm && msto->disabled)
1345 				nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
1346 		}
1347 	}
1348 
1349 	/* Add payloads for new heads, while also updating the start slots of any unmodified (but
1350 	 * active) heads that may have had their VC slots shifted left after the previous step
1351 	 */
1352 	drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
1353 		if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
1354 			struct nv50_msto *msto = nv50_msto(encoder);
1355 			struct nv50_mstc *mstc = msto->mstc;
1356 			if (mstc && mstc->mstm == mstm && !msto->disabled)
1357 				nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
1358 		}
1359 	}
1360 }
1361 
1362 static struct drm_connector *
1363 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
1364 			struct drm_dp_mst_port *port, const char *path)
1365 {
1366 	struct nv50_mstm *mstm = nv50_mstm(mgr);
1367 	struct nv50_mstc *mstc;
1368 	int ret;
1369 
1370 	ret = nv50_mstc_new(mstm, port, path, &mstc);
1371 	if (ret)
1372 		return NULL;
1373 
1374 	return &mstc->connector;
1375 }
1376 
1377 static const struct drm_dp_mst_topology_cbs
1378 nv50_mstm = {
1379 	.add_connector = nv50_mstm_add_connector,
1380 };
1381 
1382 bool
1383 nv50_mstm_service(struct nouveau_drm *drm,
1384 		  struct nouveau_connector *nv_connector,
1385 		  struct nv50_mstm *mstm)
1386 {
1387 	struct drm_dp_aux *aux = &nv_connector->aux;
1388 	bool handled = true, ret = true;
1389 	int rc;
1390 	u8 esi[8] = {};
1391 
1392 	while (handled) {
1393 		u8 ack[8] = {};
1394 
1395 		rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
1396 		if (rc != 8) {
1397 			ret = false;
1398 			break;
1399 		}
1400 
1401 		drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
1402 		if (!handled)
1403 			break;
1404 
1405 		rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
1406 
1407 		if (rc != 1) {
1408 			ret = false;
1409 			break;
1410 		}
1411 
1412 		drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
1413 	}
1414 
1415 	if (!ret)
1416 		NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n",
1417 			 nv_connector->base.name, rc);
1418 
1419 	return ret;
1420 }
1421 
1422 void
1423 nv50_mstm_remove(struct nv50_mstm *mstm)
1424 {
1425 	mstm->is_mst = false;
1426 	drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
1427 }
1428 
1429 int
1430 nv50_mstm_detect(struct nouveau_encoder *outp)
1431 {
1432 	struct nv50_mstm *mstm = outp->dp.mstm;
1433 	struct drm_dp_aux *aux;
1434 	int ret;
1435 
1436 	if (!mstm || !mstm->can_mst)
1437 		return 0;
1438 
1439 	aux = mstm->mgr.aux;
1440 
1441 	/* Clear any leftover MST state we didn't set ourselves by first
1442 	 * disabling MST if it was already enabled
1443 	 */
1444 	ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
1445 	if (ret < 0)
1446 		return ret;
1447 
1448 	/* And start enabling */
1449 	ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true);
1450 	if (ret)
1451 		return ret;
1452 
1453 	mstm->is_mst = true;
1454 	return 1;
1455 }
1456 
1457 static void
1458 nv50_mstm_fini(struct nouveau_encoder *outp)
1459 {
1460 	struct nv50_mstm *mstm = outp->dp.mstm;
1461 
1462 	if (!mstm)
1463 		return;
1464 
1465 	/* Don't change the MST state of this connector until we've finished
1466 	 * resuming, since we can't safely grab hpd_irq_lock in our resume
1467 	 * path to protect mstm->is_mst without potentially deadlocking
1468 	 */
1469 	mutex_lock(&outp->dp.hpd_irq_lock);
1470 	mstm->suspended = true;
1471 	mutex_unlock(&outp->dp.hpd_irq_lock);
1472 
1473 	if (mstm->is_mst)
1474 		drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
1475 }
1476 
1477 static void
1478 nv50_mstm_init(struct nouveau_encoder *outp, bool runtime)
1479 {
1480 	struct nv50_mstm *mstm = outp->dp.mstm;
1481 	int ret = 0;
1482 
1483 	if (!mstm)
1484 		return;
1485 
1486 	if (mstm->is_mst) {
1487 		ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
1488 		if (ret == -1)
1489 			nv50_mstm_remove(mstm);
1490 	}
1491 
1492 	mutex_lock(&outp->dp.hpd_irq_lock);
1493 	mstm->suspended = false;
1494 	mutex_unlock(&outp->dp.hpd_irq_lock);
1495 
1496 	if (ret == -1)
1497 		drm_kms_helper_hotplug_event(mstm->mgr.dev);
1498 }
1499 
1500 static void
1501 nv50_mstm_del(struct nv50_mstm **pmstm)
1502 {
1503 	struct nv50_mstm *mstm = *pmstm;
1504 	if (mstm) {
1505 		drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
1506 		kfree(*pmstm);
1507 		*pmstm = NULL;
1508 	}
1509 }
1510 
1511 static int
1512 nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
1513 	      int conn_base_id, struct nv50_mstm **pmstm)
1514 {
1515 	const int max_payloads = hweight8(outp->dcb->heads);
1516 	struct drm_device *dev = outp->base.base.dev;
1517 	struct nv50_mstm *mstm;
1518 	int ret;
1519 
1520 	if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
1521 		return -ENOMEM;
1522 	mstm->outp = outp;
1523 	mstm->mgr.cbs = &nv50_mstm;
1524 
1525 	ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
1526 					   max_payloads, conn_base_id);
1527 	if (ret)
1528 		return ret;
1529 
1530 	return 0;
1531 }
1532 
1533 /******************************************************************************
1534  * SOR
1535  *****************************************************************************/
1536 static void
1537 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
1538 		struct nv50_head_atom *asyh, u8 proto, u8 depth)
1539 {
1540 	struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
1541 	struct nv50_core *core = disp->core;
1542 
1543 	if (!asyh) {
1544 		nv_encoder->ctrl &= ~BIT(head);
1545 		if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE))
1546 			nv_encoder->ctrl = 0;
1547 	} else {
1548 		nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto);
1549 		nv_encoder->ctrl |= BIT(head);
1550 		asyh->or.depth = depth;
1551 	}
1552 
1553 	core->func->sor->ctrl(core, nv_encoder->outp.or.id, nv_encoder->ctrl, asyh);
1554 }
1555 
1556 /* TODO: Should we extend this to PWM-only backlights?
1557  * As well, should we add a DRM helper for waiting for the backlight to acknowledge
1558  * the panel backlight has been shut off? Intel doesn't seem to do this, and uses a
1559  * fixed time delay from the vbios…
1560  */
1561 static void
1562 nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
1563 {
1564 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1565 	struct nv50_head *head = nv50_head(nv_encoder->crtc);
1566 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1567 	struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
1568 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
1569 	struct nouveau_backlight *backlight = nv_connector->backlight;
1570 	struct drm_dp_aux *aux = &nv_connector->aux;
1571 	int ret;
1572 
1573 	if (backlight && backlight->uses_dpcd) {
1574 		ret = drm_edp_backlight_disable(aux, &backlight->edp_info);
1575 		if (ret < 0)
1576 			NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n",
1577 				 nv_connector->base.base.id, nv_connector->base.name, ret);
1578 	}
1579 #endif
1580 
1581 	if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS && nv_encoder->hdmi.enabled) {
1582 		nvif_outp_hdmi(&nv_encoder->outp, head->base.index,
1583 			       false, 0, 0, 0, false, false, false);
1584 		nv_encoder->hdmi.enabled = false;
1585 	}
1586 
1587 	if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
1588 		nouveau_dp_power_down(nv_encoder);
1589 
1590 	if (head->func->display_id)
1591 		head->func->display_id(head, 0);
1592 
1593 	nv_encoder->update(nv_encoder, head->base.index, NULL, 0, 0);
1594 	nv50_audio_disable(encoder, &head->base);
1595 	nv_encoder->crtc = NULL;
1596 }
1597 
1598 // common/inc/displayport/displayport.h
1599 #define DP_CONFIG_WATERMARK_ADJUST                   2
1600 #define DP_CONFIG_WATERMARK_LIMIT                   20
1601 #define DP_CONFIG_INCREASED_WATERMARK_ADJUST         8
1602 #define DP_CONFIG_INCREASED_WATERMARK_LIMIT         22
1603 
1604 static bool
1605 nv50_sor_dp_watermark_sst(struct nouveau_encoder *outp,
1606 			  struct nv50_head *head, struct nv50_head_atom *asyh)
1607 {
1608 	bool enhancedFraming = outp->dp.dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP;
1609 	u64 minRate = outp->dp.link_bw * 1000;
1610 	unsigned tuSize = 64;
1611 	unsigned waterMark;
1612 	unsigned hBlankSym;
1613 	unsigned vBlankSym;
1614 	unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST;
1615 	unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT;
1616 	// depth is multiplied by 16 in case of DSC enable
1617 	s32 hblank_symbols;
1618 	// number of link clocks per line.
1619 	int vblank_symbols	  = 0;
1620 	bool bEnableDsc = false;
1621 	unsigned surfaceWidth = asyh->mode.h.blanks - asyh->mode.h.blanke;
1622 	unsigned rasterWidth = asyh->mode.h.active;
1623 	unsigned depth = asyh->or.bpc * 3;
1624 	unsigned DSC_FACTOR = bEnableDsc ? 16 : 1;
1625 	u64 pixelClockHz = asyh->mode.clock * 1000;
1626 	u64 PrecisionFactor = 100000, ratioF, watermarkF;
1627 	u32 numLanesPerLink = outp->dp.link_nr;
1628 	u32 numSymbolsPerLine;
1629 	u32 BlankingBits;
1630 	u32 surfaceWidthPerLink;
1631 	u32 PixelSteeringBits;
1632 	u64 NumBlankingLinkClocks;
1633 	u32 MinHBlank;
1634 
1635 	if (outp->outp.info.dp.increased_wm) {
1636 		watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST;
1637 		watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT;
1638 	}
1639 
1640 	if ((pixelClockHz * depth) >= (8 * minRate * outp->dp.link_nr * DSC_FACTOR))
1641 	{
1642 		return false;
1643 	}
1644 
1645 	//
1646 	// For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with
1647 	// 0 active symbols. This may cause HW hang. Bug 200379426
1648 	//
1649 	if ((bEnableDsc) &&
1650 	    ((pixelClockHz * depth) < div_u64(8 * minRate * outp->dp.link_nr * DSC_FACTOR, 64)))
1651 	{
1652 		return false;
1653 	}
1654 
1655 	//
1656 	//  Perform the SST calculation.
1657 	//	For auto mode the watermark calculation does not need to track accumulated error the
1658 	//	formulas for manual mode will not work.  So below calculation was extracted from the DTB.
1659 	//
1660 	ratioF = div_u64((u64)pixelClockHz * depth * PrecisionFactor, DSC_FACTOR);
1661 
1662 	ratioF = div_u64(ratioF, 8 * (u64) minRate * outp->dp.link_nr);
1663 
1664 	if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below
1665 		return false;
1666 
1667 	watermarkF = div_u64(ratioF * tuSize * (PrecisionFactor - ratioF), PrecisionFactor);
1668 	waterMark = (unsigned)(watermarkAdjust + (div_u64(2 * div_u64(depth * PrecisionFactor, 8 * numLanesPerLink * DSC_FACTOR) + watermarkF, PrecisionFactor)));
1669 
1670 	//
1671 	//  Bounds check the watermark
1672 	//
1673 	numSymbolsPerLine = div_u64(surfaceWidth * depth, 8 * outp->dp.link_nr * DSC_FACTOR);
1674 
1675 	if (WARN_ON(waterMark > 39 || waterMark > numSymbolsPerLine))
1676 		return false;
1677 
1678 	//
1679 	//  Clamp the low side
1680 	//
1681 	if (waterMark < watermarkMinimum)
1682 		waterMark = watermarkMinimum;
1683 
1684 	//Bits to send BS/BE/Extra symbols due to pixel padding
1685 	//Also accounts for enhanced framing.
1686 	BlankingBits = 3*8*numLanesPerLink + (enhancedFraming ? 3*8*numLanesPerLink : 0);
1687 
1688 	//VBID/MVID/MAUD sent 4 times all the time
1689 	BlankingBits += 3*8*4;
1690 
1691 	surfaceWidthPerLink = surfaceWidth;
1692 
1693 	//Extra bits sent due to pixel steering
1694 	u32 remain;
1695 	div_u64_rem(surfaceWidthPerLink, numLanesPerLink, &remain);
1696 	PixelSteeringBits = remain ? div_u64((numLanesPerLink - remain) * depth, DSC_FACTOR) : 0;
1697 
1698 	BlankingBits += PixelSteeringBits;
1699 	NumBlankingLinkClocks = div_u64((u64)BlankingBits * PrecisionFactor, (8 * numLanesPerLink));
1700 	MinHBlank = (u32)(div_u64(div_u64(NumBlankingLinkClocks * pixelClockHz, minRate), PrecisionFactor));
1701 	MinHBlank += 12;
1702 
1703 	if (WARN_ON(MinHBlank > rasterWidth - surfaceWidth))
1704 		return false;
1705 
1706 	// Bug 702290 - Active Width should be greater than 60
1707 	if (WARN_ON(surfaceWidth <= 60))
1708 		return false;
1709 
1710 
1711 	hblank_symbols = (s32)(div_u64((u64)(rasterWidth - surfaceWidth - MinHBlank) * minRate, pixelClockHz));
1712 
1713 	//reduce HBlank Symbols to account for secondary data packet
1714 	hblank_symbols -= 1; //Stuffer latency to send BS
1715 	hblank_symbols -= 3; //SPKT latency to send data to stuffer
1716 
1717 	hblank_symbols -= numLanesPerLink == 1 ? 9  : numLanesPerLink == 2 ? 6 : 3;
1718 
1719 	hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols;
1720 
1721 	// Refer to dev_disp.ref for more information.
1722 	// # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1;
1723 	// where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
1724 	if (surfaceWidth < 40)
1725 	{
1726 		vblank_symbols = 0;
1727 	}
1728 	else
1729 	{
1730 		vblank_symbols = (s32)((div_u64((u64)(surfaceWidth - 40) * minRate, pixelClockHz))) - 1;
1731 
1732 		vblank_symbols -= numLanesPerLink == 1 ? 39  : numLanesPerLink == 2 ? 21 : 12;
1733 	}
1734 
1735 	vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols;
1736 
1737 	return nvif_outp_dp_sst(&outp->outp, head->base.index, waterMark, hBlankSym, vBlankSym);
1738 }
1739 
1740 static void
1741 nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
1742 {
1743 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1744 	struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
1745 	struct nv50_head_atom *asyh =
1746 		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
1747 	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
1748 	struct nv50_disp *disp = nv50_disp(encoder->dev);
1749 	struct nv50_head *head = nv50_head(&nv_crtc->base);
1750 	struct nvif_outp *outp = &nv_encoder->outp;
1751 	struct drm_device *dev = encoder->dev;
1752 	struct nouveau_drm *drm = nouveau_drm(dev);
1753 	struct nouveau_connector *nv_connector;
1754 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1755 	struct nouveau_backlight *backlight;
1756 #endif
1757 	struct nvbios *bios = &drm->vbios;
1758 	bool lvds_dual = false, lvds_8bpc = false, hda = false;
1759 	u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
1760 	u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
1761 
1762 	nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
1763 	nv_encoder->crtc = &nv_crtc->base;
1764 
1765 	if ((disp->disp->object.oclass == GT214_DISP ||
1766 	     disp->disp->object.oclass >= GF110_DISP) &&
1767 	    nv_encoder->dcb->type != DCB_OUTPUT_LVDS &&
1768 	    drm_detect_monitor_audio(nv_connector->edid))
1769 		hda = true;
1770 
1771 	if (!nvif_outp_acquired(outp))
1772 		nvif_outp_acquire_sor(outp, hda);
1773 
1774 	switch (nv_encoder->dcb->type) {
1775 	case DCB_OUTPUT_TMDS:
1776 		if (disp->disp->object.oclass != NV50_DISP &&
1777 		    drm_detect_hdmi_monitor(nv_connector->edid))
1778 			nv50_hdmi_enable(encoder, nv_crtc, nv_connector, state, mode, hda);
1779 
1780 		if (nv_encoder->outp.or.link & 1) {
1781 			proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A;
1782 			/* Only enable dual-link if:
1783 			 *  - Need to (i.e. rate > 165MHz)
1784 			 *  - DCB says we can
1785 			 *  - Not an HDMI monitor, since there's no dual-link
1786 			 *    on HDMI.
1787 			 */
1788 			if (mode->clock >= 165000 &&
1789 			    nv_encoder->dcb->duallink_possible &&
1790 			    !drm_detect_hdmi_monitor(nv_connector->edid))
1791 				proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
1792 		} else {
1793 			proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
1794 		}
1795 		break;
1796 	case DCB_OUTPUT_LVDS:
1797 		proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
1798 
1799 		if (bios->fp_no_ddc) {
1800 			lvds_dual = bios->fp.dual_link;
1801 			lvds_8bpc = bios->fp.if_is_24bit;
1802 		} else {
1803 			if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
1804 				if (((u8 *)nv_connector->edid)[121] == 2)
1805 					lvds_dual = true;
1806 			} else
1807 			if (mode->clock >= bios->fp.duallink_transition_clk) {
1808 				lvds_dual = true;
1809 			}
1810 
1811 			if (lvds_dual) {
1812 				if (bios->fp.strapless_is_24bit & 2)
1813 					lvds_8bpc = true;
1814 			} else {
1815 				if (bios->fp.strapless_is_24bit & 1)
1816 					lvds_8bpc = true;
1817 			}
1818 
1819 			if (asyh->or.bpc == 8)
1820 				lvds_8bpc = true;
1821 		}
1822 
1823 		nvif_outp_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc);
1824 		break;
1825 	case DCB_OUTPUT_DP:
1826 		nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc);
1827 		nv50_sor_dp_watermark_sst(nv_encoder, head, asyh);
1828 		depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
1829 
1830 		if (nv_encoder->outp.or.link & 1)
1831 			proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A;
1832 		else
1833 			proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
1834 
1835 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1836 		backlight = nv_connector->backlight;
1837 		if (backlight && backlight->uses_dpcd)
1838 			drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
1839 						 (u16)backlight->dev->props.brightness);
1840 #endif
1841 
1842 		break;
1843 	default:
1844 		BUG();
1845 		break;
1846 	}
1847 
1848 	if (head->func->display_id)
1849 		head->func->display_id(head, BIT(nv_encoder->outp.id));
1850 
1851 	nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
1852 }
1853 
1854 static const struct drm_encoder_helper_funcs
1855 nv50_sor_help = {
1856 	.atomic_check = nv50_outp_atomic_check,
1857 	.atomic_enable = nv50_sor_atomic_enable,
1858 	.atomic_disable = nv50_sor_atomic_disable,
1859 };
1860 
1861 static void
1862 nv50_sor_destroy(struct drm_encoder *encoder)
1863 {
1864 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1865 
1866 	nv50_mstm_del(&nv_encoder->dp.mstm);
1867 	drm_encoder_cleanup(encoder);
1868 
1869 	if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
1870 		mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
1871 
1872 	nvif_outp_dtor(&nv_encoder->outp);
1873 	kfree(encoder);
1874 }
1875 
1876 static const struct drm_encoder_funcs
1877 nv50_sor_func = {
1878 	.destroy = nv50_sor_destroy,
1879 };
1880 
1881 static int
1882 nv50_sor_create(struct nouveau_encoder *nv_encoder)
1883 {
1884 	struct drm_connector *connector = &nv_encoder->conn->base;
1885 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
1886 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
1887 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
1888 	struct drm_encoder *encoder;
1889 	struct dcb_output *dcbe = nv_encoder->dcb;
1890 	struct nv50_disp *disp = nv50_disp(connector->dev);
1891 	int type, ret;
1892 
1893 	switch (dcbe->type) {
1894 	case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
1895 	case DCB_OUTPUT_TMDS:
1896 	case DCB_OUTPUT_DP:
1897 	default:
1898 		type = DRM_MODE_ENCODER_TMDS;
1899 		break;
1900 	}
1901 
1902 	nv_encoder->update = nv50_sor_update;
1903 
1904 	encoder = to_drm_encoder(nv_encoder);
1905 	drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
1906 			 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
1907 	drm_encoder_helper_add(encoder, &nv50_sor_help);
1908 
1909 	drm_connector_attach_encoder(connector, encoder);
1910 
1911 	disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
1912 	nv50_outp_dump_caps(drm, nv_encoder);
1913 
1914 	if (dcbe->type == DCB_OUTPUT_DP) {
1915 		mutex_init(&nv_encoder->dp.hpd_irq_lock);
1916 
1917 		if (disp->disp->object.oclass < GF110_DISP) {
1918 			/* HW has no support for address-only
1919 			 * transactions, so we're required to
1920 			 * use custom I2C-over-AUX code.
1921 			 */
1922 			struct nvkm_i2c_aux *aux;
1923 
1924 			aux = nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
1925 			if (!aux)
1926 				return -EINVAL;
1927 
1928 			nv_encoder->i2c = &aux->i2c;
1929 		} else {
1930 			nv_encoder->i2c = &nv_connector->aux.ddc;
1931 		}
1932 
1933 		if (nv_connector->type != DCB_CONNECTOR_eDP && nv_encoder->outp.info.dp.mst) {
1934 			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
1935 					    16, nv_connector->base.base.id,
1936 					    &nv_encoder->dp.mstm);
1937 			if (ret)
1938 				return ret;
1939 		}
1940 	} else
1941 	if (nv_encoder->outp.info.ddc != NVIF_OUTP_DDC_INVALID) {
1942 		struct nvkm_i2c_bus *bus =
1943 			nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
1944 		if (bus)
1945 			nv_encoder->i2c = &bus->i2c;
1946 	}
1947 
1948 	return 0;
1949 }
1950 
1951 /******************************************************************************
1952  * PIOR
1953  *****************************************************************************/
1954 static int
1955 nv50_pior_atomic_check(struct drm_encoder *encoder,
1956 		       struct drm_crtc_state *crtc_state,
1957 		       struct drm_connector_state *conn_state)
1958 {
1959 	int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
1960 	if (ret)
1961 		return ret;
1962 	crtc_state->adjusted_mode.clock *= 2;
1963 	return 0;
1964 }
1965 
1966 static void
1967 nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
1968 {
1969 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1970 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
1971 	const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
1972 
1973 	core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
1974 	nv_encoder->crtc = NULL;
1975 }
1976 
1977 static void
1978 nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
1979 {
1980 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1981 	struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
1982 	struct nv50_head_atom *asyh =
1983 		nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
1984 	struct nv50_core *core = nv50_disp(encoder->dev)->core;
1985 	u32 ctrl = 0;
1986 
1987 	switch (nv_crtc->index) {
1988 	case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break;
1989 	case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break;
1990 	default:
1991 		WARN_ON(1);
1992 		break;
1993 	}
1994 
1995 	switch (asyh->or.bpc) {
1996 	case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break;
1997 	case  8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break;
1998 	case  6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break;
1999 	default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
2000 	}
2001 
2002 	if (!nvif_outp_acquired(&nv_encoder->outp))
2003 		nvif_outp_acquire_pior(&nv_encoder->outp);
2004 
2005 	switch (nv_encoder->dcb->type) {
2006 	case DCB_OUTPUT_TMDS:
2007 		ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
2008 		break;
2009 	case DCB_OUTPUT_DP:
2010 		ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
2011 		nouveau_dp_train(nv_encoder, false, asyh->state.adjusted_mode.clock, 6);
2012 		break;
2013 	default:
2014 		BUG();
2015 		break;
2016 	}
2017 
2018 	core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
2019 	nv_encoder->crtc = &nv_crtc->base;
2020 }
2021 
2022 static const struct drm_encoder_helper_funcs
2023 nv50_pior_help = {
2024 	.atomic_check = nv50_pior_atomic_check,
2025 	.atomic_enable = nv50_pior_atomic_enable,
2026 	.atomic_disable = nv50_pior_atomic_disable,
2027 };
2028 
2029 static void
2030 nv50_pior_destroy(struct drm_encoder *encoder)
2031 {
2032 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2033 
2034 	nvif_outp_dtor(&nv_encoder->outp);
2035 
2036 	drm_encoder_cleanup(encoder);
2037 
2038 	mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
2039 	kfree(encoder);
2040 }
2041 
2042 static const struct drm_encoder_funcs
2043 nv50_pior_func = {
2044 	.destroy = nv50_pior_destroy,
2045 };
2046 
2047 static int
2048 nv50_pior_create(struct nouveau_encoder *nv_encoder)
2049 {
2050 	struct drm_connector *connector = &nv_encoder->conn->base;
2051 	struct drm_device *dev = connector->dev;
2052 	struct nouveau_drm *drm = nouveau_drm(dev);
2053 	struct nv50_disp *disp = nv50_disp(dev);
2054 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2055 	struct nvkm_i2c_bus *bus = NULL;
2056 	struct nvkm_i2c_aux *aux = NULL;
2057 	struct i2c_adapter *ddc;
2058 	struct drm_encoder *encoder;
2059 	struct dcb_output *dcbe = nv_encoder->dcb;
2060 	int type;
2061 
2062 	switch (dcbe->type) {
2063 	case DCB_OUTPUT_TMDS:
2064 		bus  = nvkm_i2c_bus_find(i2c, nv_encoder->outp.info.ddc);
2065 		ddc  = bus ? &bus->i2c : NULL;
2066 		type = DRM_MODE_ENCODER_TMDS;
2067 		break;
2068 	case DCB_OUTPUT_DP:
2069 		aux  = nvkm_i2c_aux_find(i2c, nv_encoder->outp.info.dp.aux);
2070 		ddc  = aux ? &aux->i2c : NULL;
2071 		type = DRM_MODE_ENCODER_TMDS;
2072 		break;
2073 	default:
2074 		return -ENODEV;
2075 	}
2076 
2077 	nv_encoder->i2c = ddc;
2078 
2079 	mutex_init(&nv_encoder->dp.hpd_irq_lock);
2080 
2081 	encoder = to_drm_encoder(nv_encoder);
2082 	drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
2083 			 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
2084 	drm_encoder_helper_add(encoder, &nv50_pior_help);
2085 
2086 	drm_connector_attach_encoder(connector, encoder);
2087 
2088 	disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
2089 	nv50_outp_dump_caps(drm, nv_encoder);
2090 
2091 	return 0;
2092 }
2093 
2094 /******************************************************************************
2095  * Atomic
2096  *****************************************************************************/
2097 
2098 static void
2099 nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
2100 {
2101 	struct drm_dp_mst_topology_mgr *mgr;
2102 	struct drm_dp_mst_topology_state *mst_state;
2103 	struct nouveau_drm *drm = nouveau_drm(state->dev);
2104 	struct nv50_disp *disp = nv50_disp(drm->dev);
2105 	struct nv50_atom *atom = nv50_atom(state);
2106 	struct nv50_core *core = disp->core;
2107 	struct nv50_outp_atom *outp;
2108 	struct nv50_mstm *mstm;
2109 	int i;
2110 
2111 	NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
2112 
2113 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
2114 		mstm = nv50_mstm(mgr);
2115 		if (mstm->modified)
2116 			nv50_mstm_prepare(state, mst_state, mstm);
2117 	}
2118 
2119 	core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
2120 	core->func->update(core, interlock, true);
2121 	if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
2122 				       disp->core->chan.base.device))
2123 		NV_ERROR(drm, "core notifier timeout\n");
2124 
2125 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
2126 		mstm = nv50_mstm(mgr);
2127 		if (mstm->modified)
2128 			nv50_mstm_cleanup(state, mst_state, mstm);
2129 	}
2130 
2131 	list_for_each_entry(outp, &atom->outp, head) {
2132 		if (outp->encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
2133 			struct nouveau_encoder *nv_encoder = nouveau_encoder(outp->encoder);
2134 
2135 			if (outp->enabled) {
2136 				nv50_audio_enable(outp->encoder, nouveau_crtc(nv_encoder->crtc),
2137 						  nv_encoder->conn, NULL, NULL);
2138 				outp->enabled = outp->disabled = false;
2139 			} else {
2140 				if (outp->disabled) {
2141 					nvif_outp_release(&nv_encoder->outp);
2142 					outp->disabled = false;
2143 				}
2144 			}
2145 		}
2146 	}
2147 }
2148 
2149 static void
2150 nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
2151 {
2152 	struct drm_plane_state *new_plane_state;
2153 	struct drm_plane *plane;
2154 	int i;
2155 
2156 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2157 		struct nv50_wndw *wndw = nv50_wndw(plane);
2158 		if (interlock[wndw->interlock.type] & wndw->interlock.data) {
2159 			if (wndw->func->update)
2160 				wndw->func->update(wndw, interlock);
2161 		}
2162 	}
2163 }
2164 
2165 static void
2166 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
2167 {
2168 	struct drm_device *dev = state->dev;
2169 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
2170 	struct drm_crtc *crtc;
2171 	struct drm_plane_state *new_plane_state;
2172 	struct drm_plane *plane;
2173 	struct nouveau_drm *drm = nouveau_drm(dev);
2174 	struct nv50_disp *disp = nv50_disp(dev);
2175 	struct nv50_atom *atom = nv50_atom(state);
2176 	struct nv50_core *core = disp->core;
2177 	struct nv50_outp_atom *outp, *outt;
2178 	u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
2179 	int i;
2180 	bool flushed = false;
2181 
2182 	NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
2183 	nv50_crc_atomic_stop_reporting(state);
2184 	drm_atomic_helper_wait_for_fences(dev, state, false);
2185 	drm_atomic_helper_wait_for_dependencies(state);
2186 	drm_dp_mst_atomic_wait_for_dependencies(state);
2187 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
2188 	drm_atomic_helper_calc_timestamping_constants(state);
2189 
2190 	if (atom->lock_core)
2191 		mutex_lock(&disp->mutex);
2192 
2193 	/* Disable head(s). */
2194 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2195 		struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
2196 		struct nv50_head *head = nv50_head(crtc);
2197 
2198 		NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
2199 			  asyh->clr.mask, asyh->set.mask);
2200 
2201 		if (old_crtc_state->active && !new_crtc_state->active) {
2202 			pm_runtime_put_noidle(dev->dev);
2203 			drm_crtc_vblank_off(crtc);
2204 		}
2205 
2206 		if (asyh->clr.mask) {
2207 			nv50_head_flush_clr(head, asyh, atom->flush_disable);
2208 			interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
2209 		}
2210 	}
2211 
2212 	/* Disable plane(s). */
2213 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2214 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
2215 		struct nv50_wndw *wndw = nv50_wndw(plane);
2216 
2217 		NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
2218 			  asyw->clr.mask, asyw->set.mask);
2219 		if (!asyw->clr.mask)
2220 			continue;
2221 
2222 		nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
2223 	}
2224 
2225 	/* Disable output path(s). */
2226 	list_for_each_entry(outp, &atom->outp, head) {
2227 		const struct drm_encoder_helper_funcs *help;
2228 		struct drm_encoder *encoder;
2229 
2230 		encoder = outp->encoder;
2231 		help = encoder->helper_private;
2232 
2233 		NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
2234 			  outp->clr.mask, outp->set.mask);
2235 
2236 		if (outp->clr.mask) {
2237 			help->atomic_disable(encoder, state);
2238 			outp->disabled = true;
2239 			interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
2240 		}
2241 	}
2242 
2243 	/* Flush disable. */
2244 	if (interlock[NV50_DISP_INTERLOCK_CORE]) {
2245 		if (atom->flush_disable) {
2246 			nv50_disp_atomic_commit_wndw(state, interlock);
2247 			nv50_disp_atomic_commit_core(state, interlock);
2248 			memset(interlock, 0x00, sizeof(interlock));
2249 
2250 			flushed = true;
2251 		}
2252 	}
2253 
2254 	if (flushed)
2255 		nv50_crc_atomic_release_notifier_contexts(state);
2256 	nv50_crc_atomic_init_notifier_contexts(state);
2257 
2258 	/* Update output path(s). */
2259 	list_for_each_entry(outp, &atom->outp, head) {
2260 		const struct drm_encoder_helper_funcs *help;
2261 		struct drm_encoder *encoder;
2262 
2263 		encoder = outp->encoder;
2264 		help = encoder->helper_private;
2265 
2266 		NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
2267 			  outp->set.mask, outp->clr.mask);
2268 
2269 		if (outp->set.mask) {
2270 			help->atomic_enable(encoder, state);
2271 			outp->enabled = true;
2272 			interlock[NV50_DISP_INTERLOCK_CORE] = 1;
2273 		}
2274 	}
2275 
2276 	/* Update head(s). */
2277 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2278 		struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
2279 		struct nv50_head *head = nv50_head(crtc);
2280 
2281 		NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
2282 			  asyh->set.mask, asyh->clr.mask);
2283 
2284 		if (asyh->set.mask) {
2285 			nv50_head_flush_set(head, asyh);
2286 			interlock[NV50_DISP_INTERLOCK_CORE] = 1;
2287 		}
2288 
2289 		if (new_crtc_state->active) {
2290 			if (!old_crtc_state->active) {
2291 				drm_crtc_vblank_on(crtc);
2292 				pm_runtime_get_noresume(dev->dev);
2293 			}
2294 			if (new_crtc_state->event)
2295 				drm_crtc_vblank_get(crtc);
2296 		}
2297 	}
2298 
2299 	/* Update window->head assignment.
2300 	 *
2301 	 * This has to happen in an update that's not interlocked with
2302 	 * any window channels to avoid hitting HW error checks.
2303 	 *
2304 	 *TODO: Proper handling of window ownership (Turing apparently
2305 	 *      supports non-fixed mappings).
2306 	 */
2307 	if (core->assign_windows) {
2308 		core->func->wndw.owner(core);
2309 		nv50_disp_atomic_commit_core(state, interlock);
2310 		core->assign_windows = false;
2311 		interlock[NV50_DISP_INTERLOCK_CORE] = 0;
2312 	}
2313 
2314 	/* Finish updating head(s)...
2315 	 *
2316 	 * NVD is rather picky about both where window assignments can change,
2317 	 * *and* about certain core and window channel states matching.
2318 	 *
2319 	 * The EFI GOP driver on newer GPUs configures window channels with a
2320 	 * different output format to what we do, and the core channel update
2321 	 * in the assign_windows case above would result in a state mismatch.
2322 	 *
2323 	 * Delay some of the head update until after that point to workaround
2324 	 * the issue.  This only affects the initial modeset.
2325 	 *
2326 	 * TODO: handle this better when adding flexible window mapping
2327 	 */
2328 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2329 		struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
2330 		struct nv50_head *head = nv50_head(crtc);
2331 
2332 		NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
2333 			  asyh->set.mask, asyh->clr.mask);
2334 
2335 		if (asyh->set.mask) {
2336 			nv50_head_flush_set_wndw(head, asyh);
2337 			interlock[NV50_DISP_INTERLOCK_CORE] = 1;
2338 		}
2339 	}
2340 
2341 	/* Update plane(s). */
2342 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2343 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
2344 		struct nv50_wndw *wndw = nv50_wndw(plane);
2345 
2346 		NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
2347 			  asyw->set.mask, asyw->clr.mask);
2348 		if ( !asyw->set.mask &&
2349 		    (!asyw->clr.mask || atom->flush_disable))
2350 			continue;
2351 
2352 		nv50_wndw_flush_set(wndw, interlock, asyw);
2353 	}
2354 
2355 	/* Flush update. */
2356 	nv50_disp_atomic_commit_wndw(state, interlock);
2357 
2358 	if (interlock[NV50_DISP_INTERLOCK_CORE]) {
2359 		if (interlock[NV50_DISP_INTERLOCK_BASE] ||
2360 		    interlock[NV50_DISP_INTERLOCK_OVLY] ||
2361 		    interlock[NV50_DISP_INTERLOCK_WNDW] ||
2362 		    !atom->state.legacy_cursor_update)
2363 			nv50_disp_atomic_commit_core(state, interlock);
2364 		else
2365 			disp->core->func->update(disp->core, interlock, false);
2366 	}
2367 
2368 	if (atom->lock_core)
2369 		mutex_unlock(&disp->mutex);
2370 
2371 	list_for_each_entry_safe(outp, outt, &atom->outp, head) {
2372 		list_del(&outp->head);
2373 		kfree(outp);
2374 	}
2375 
2376 	/* Wait for HW to signal completion. */
2377 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2378 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
2379 		struct nv50_wndw *wndw = nv50_wndw(plane);
2380 		int ret = nv50_wndw_wait_armed(wndw, asyw);
2381 		if (ret)
2382 			NV_ERROR(drm, "%s: timeout\n", plane->name);
2383 	}
2384 
2385 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2386 		if (new_crtc_state->event) {
2387 			unsigned long flags;
2388 			/* Get correct count/ts if racing with vblank irq */
2389 			if (new_crtc_state->active)
2390 				drm_crtc_accurate_vblank_count(crtc);
2391 			spin_lock_irqsave(&crtc->dev->event_lock, flags);
2392 			drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
2393 			spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
2394 
2395 			new_crtc_state->event = NULL;
2396 			if (new_crtc_state->active)
2397 				drm_crtc_vblank_put(crtc);
2398 		}
2399 	}
2400 
2401 	nv50_crc_atomic_start_reporting(state);
2402 	if (!flushed)
2403 		nv50_crc_atomic_release_notifier_contexts(state);
2404 
2405 	drm_atomic_helper_commit_hw_done(state);
2406 	drm_atomic_helper_cleanup_planes(dev, state);
2407 	drm_atomic_helper_commit_cleanup_done(state);
2408 	drm_atomic_state_put(state);
2409 
2410 	/* Drop the RPM ref we got from nv50_disp_atomic_commit() */
2411 	pm_runtime_mark_last_busy(dev->dev);
2412 	pm_runtime_put_autosuspend(dev->dev);
2413 }
2414 
2415 static void
2416 nv50_disp_atomic_commit_work(struct work_struct *work)
2417 {
2418 	struct drm_atomic_state *state =
2419 		container_of(work, typeof(*state), commit_work);
2420 	nv50_disp_atomic_commit_tail(state);
2421 }
2422 
2423 static int
2424 nv50_disp_atomic_commit(struct drm_device *dev,
2425 			struct drm_atomic_state *state, bool nonblock)
2426 {
2427 	struct drm_plane_state *new_plane_state;
2428 	struct drm_plane *plane;
2429 	int ret, i;
2430 
2431 	ret = pm_runtime_get_sync(dev->dev);
2432 	if (ret < 0 && ret != -EACCES) {
2433 		pm_runtime_put_autosuspend(dev->dev);
2434 		return ret;
2435 	}
2436 
2437 	ret = drm_atomic_helper_setup_commit(state, nonblock);
2438 	if (ret)
2439 		goto done;
2440 
2441 	INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
2442 
2443 	ret = drm_atomic_helper_prepare_planes(dev, state);
2444 	if (ret)
2445 		goto done;
2446 
2447 	if (!nonblock) {
2448 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
2449 		if (ret)
2450 			goto err_cleanup;
2451 	}
2452 
2453 	ret = drm_atomic_helper_swap_state(state, true);
2454 	if (ret)
2455 		goto err_cleanup;
2456 
2457 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2458 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
2459 		struct nv50_wndw *wndw = nv50_wndw(plane);
2460 
2461 		if (asyw->set.image)
2462 			nv50_wndw_ntfy_enable(wndw, asyw);
2463 	}
2464 
2465 	drm_atomic_state_get(state);
2466 
2467 	/*
2468 	 * Grab another RPM ref for the commit tail, which will release the
2469 	 * ref when it's finished
2470 	 */
2471 	pm_runtime_get_noresume(dev->dev);
2472 
2473 	if (nonblock)
2474 		queue_work(system_unbound_wq, &state->commit_work);
2475 	else
2476 		nv50_disp_atomic_commit_tail(state);
2477 
2478 err_cleanup:
2479 	if (ret)
2480 		drm_atomic_helper_unprepare_planes(dev, state);
2481 done:
2482 	pm_runtime_put_autosuspend(dev->dev);
2483 	return ret;
2484 }
2485 
2486 static struct nv50_outp_atom *
2487 nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
2488 {
2489 	struct nv50_outp_atom *outp;
2490 
2491 	list_for_each_entry(outp, &atom->outp, head) {
2492 		if (outp->encoder == encoder)
2493 			return outp;
2494 	}
2495 
2496 	outp = kzalloc(sizeof(*outp), GFP_KERNEL);
2497 	if (!outp)
2498 		return ERR_PTR(-ENOMEM);
2499 
2500 	list_add(&outp->head, &atom->outp);
2501 	outp->encoder = encoder;
2502 	return outp;
2503 }
2504 
2505 static int
2506 nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
2507 				struct drm_connector_state *old_connector_state)
2508 {
2509 	struct drm_encoder *encoder = old_connector_state->best_encoder;
2510 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2511 	struct drm_crtc *crtc;
2512 	struct nv50_outp_atom *outp;
2513 
2514 	if (!(crtc = old_connector_state->crtc))
2515 		return 0;
2516 
2517 	old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
2518 	new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
2519 	if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
2520 		outp = nv50_disp_outp_atomic_add(atom, encoder);
2521 		if (IS_ERR(outp))
2522 			return PTR_ERR(outp);
2523 
2524 		if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST ||
2525 		    nouveau_encoder(outp->encoder)->dcb->type == DCB_OUTPUT_DP)
2526 			atom->flush_disable = true;
2527 		outp->clr.ctrl = true;
2528 		atom->lock_core = true;
2529 	}
2530 
2531 	return 0;
2532 }
2533 
2534 static int
2535 nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
2536 				struct drm_connector_state *connector_state)
2537 {
2538 	struct drm_encoder *encoder = connector_state->best_encoder;
2539 	struct drm_crtc_state *new_crtc_state;
2540 	struct drm_crtc *crtc;
2541 	struct nv50_outp_atom *outp;
2542 
2543 	if (!(crtc = connector_state->crtc))
2544 		return 0;
2545 
2546 	new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
2547 	if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
2548 		outp = nv50_disp_outp_atomic_add(atom, encoder);
2549 		if (IS_ERR(outp))
2550 			return PTR_ERR(outp);
2551 
2552 		outp->set.ctrl = true;
2553 		atom->lock_core = true;
2554 	}
2555 
2556 	return 0;
2557 }
2558 
2559 static int
2560 nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
2561 {
2562 	struct nv50_atom *atom = nv50_atom(state);
2563 	struct nv50_core *core = nv50_disp(dev)->core;
2564 	struct drm_connector_state *old_connector_state, *new_connector_state;
2565 	struct drm_connector *connector;
2566 	struct drm_crtc_state *new_crtc_state;
2567 	struct drm_crtc *crtc;
2568 	struct nv50_head *head;
2569 	struct nv50_head_atom *asyh;
2570 	int ret, i;
2571 
2572 	if (core->assign_windows && core->func->head->static_wndw_map) {
2573 		drm_for_each_crtc(crtc, dev) {
2574 			new_crtc_state = drm_atomic_get_crtc_state(state,
2575 								   crtc);
2576 			if (IS_ERR(new_crtc_state))
2577 				return PTR_ERR(new_crtc_state);
2578 
2579 			head = nv50_head(crtc);
2580 			asyh = nv50_head_atom(new_crtc_state);
2581 			core->func->head->static_wndw_map(head, asyh);
2582 		}
2583 	}
2584 
2585 	/* We need to handle colour management on a per-plane basis. */
2586 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2587 		if (new_crtc_state->color_mgmt_changed) {
2588 			ret = drm_atomic_add_affected_planes(state, crtc);
2589 			if (ret)
2590 				return ret;
2591 		}
2592 	}
2593 
2594 	ret = drm_atomic_helper_check(dev, state);
2595 	if (ret)
2596 		return ret;
2597 
2598 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
2599 		ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
2600 		if (ret)
2601 			return ret;
2602 
2603 		ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
2604 		if (ret)
2605 			return ret;
2606 	}
2607 
2608 	ret = drm_dp_mst_atomic_check(state);
2609 	if (ret)
2610 		return ret;
2611 
2612 	nv50_crc_atomic_check_outp(atom);
2613 
2614 	return 0;
2615 }
2616 
2617 static void
2618 nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
2619 {
2620 	struct nv50_atom *atom = nv50_atom(state);
2621 	struct nv50_outp_atom *outp, *outt;
2622 
2623 	list_for_each_entry_safe(outp, outt, &atom->outp, head) {
2624 		list_del(&outp->head);
2625 		kfree(outp);
2626 	}
2627 
2628 	drm_atomic_state_default_clear(state);
2629 }
2630 
2631 static void
2632 nv50_disp_atomic_state_free(struct drm_atomic_state *state)
2633 {
2634 	struct nv50_atom *atom = nv50_atom(state);
2635 	drm_atomic_state_default_release(&atom->state);
2636 	kfree(atom);
2637 }
2638 
2639 static struct drm_atomic_state *
2640 nv50_disp_atomic_state_alloc(struct drm_device *dev)
2641 {
2642 	struct nv50_atom *atom;
2643 	if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
2644 	    drm_atomic_state_init(dev, &atom->state) < 0) {
2645 		kfree(atom);
2646 		return NULL;
2647 	}
2648 	INIT_LIST_HEAD(&atom->outp);
2649 	return &atom->state;
2650 }
2651 
2652 static const struct drm_mode_config_funcs
2653 nv50_disp_func = {
2654 	.fb_create = nouveau_user_framebuffer_create,
2655 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2656 	.atomic_check = nv50_disp_atomic_check,
2657 	.atomic_commit = nv50_disp_atomic_commit,
2658 	.atomic_state_alloc = nv50_disp_atomic_state_alloc,
2659 	.atomic_state_clear = nv50_disp_atomic_state_clear,
2660 	.atomic_state_free = nv50_disp_atomic_state_free,
2661 };
2662 
2663 static const struct drm_mode_config_helper_funcs
2664 nv50_disp_helper_func = {
2665 	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
2666 };
2667 
2668 /******************************************************************************
2669  * Init
2670  *****************************************************************************/
2671 
2672 static void
2673 nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
2674 {
2675 	struct nouveau_drm *drm = nouveau_drm(dev);
2676 	struct drm_encoder *encoder;
2677 
2678 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2679 		if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
2680 			nv50_mstm_fini(nouveau_encoder(encoder));
2681 	}
2682 
2683 	if (!runtime)
2684 		cancel_work_sync(&drm->hpd_work);
2685 }
2686 
2687 static inline void
2688 nv50_display_read_hw_or_state(struct drm_device *dev, struct nv50_disp *disp,
2689 			      struct nouveau_encoder *outp)
2690 {
2691 	struct drm_crtc *crtc;
2692 	struct drm_connector_list_iter conn_iter;
2693 	struct drm_connector *conn;
2694 	struct nv50_head_atom *armh;
2695 	const u32 encoder_mask = drm_encoder_mask(&outp->base.base);
2696 	bool found_conn = false, found_head = false;
2697 	u8 proto;
2698 	int head_idx;
2699 	int ret;
2700 
2701 	switch (outp->dcb->type) {
2702 	case DCB_OUTPUT_TMDS:
2703 		ret = nvif_outp_inherit_tmds(&outp->outp, &proto);
2704 		break;
2705 	case DCB_OUTPUT_DP:
2706 		ret = nvif_outp_inherit_dp(&outp->outp, &proto);
2707 		break;
2708 	case DCB_OUTPUT_LVDS:
2709 		ret = nvif_outp_inherit_lvds(&outp->outp, &proto);
2710 		break;
2711 	case DCB_OUTPUT_ANALOG:
2712 		ret = nvif_outp_inherit_rgb_crt(&outp->outp, &proto);
2713 		break;
2714 	default:
2715 		drm_dbg_kms(dev, "Readback for %s not implemented yet, skipping\n",
2716 			    outp->base.base.name);
2717 		drm_WARN_ON(dev, true);
2718 		return;
2719 	}
2720 
2721 	if (ret < 0)
2722 		return;
2723 
2724 	head_idx = ret;
2725 
2726 	drm_for_each_crtc(crtc, dev) {
2727 		if (crtc->index != head_idx)
2728 			continue;
2729 
2730 		armh = nv50_head_atom(crtc->state);
2731 		found_head = true;
2732 		break;
2733 	}
2734 	if (drm_WARN_ON(dev, !found_head))
2735 		return;
2736 
2737 	/* Figure out which connector is being used by this encoder */
2738 	drm_connector_list_iter_begin(dev, &conn_iter);
2739 	nouveau_for_each_non_mst_connector_iter(conn, &conn_iter) {
2740 		if (nouveau_connector(conn)->index == outp->dcb->connector) {
2741 			found_conn = true;
2742 			break;
2743 		}
2744 	}
2745 	drm_connector_list_iter_end(&conn_iter);
2746 	if (drm_WARN_ON(dev, !found_conn))
2747 		return;
2748 
2749 	armh->state.encoder_mask = encoder_mask;
2750 	armh->state.connector_mask = drm_connector_mask(conn);
2751 	armh->state.active = true;
2752 	armh->state.enable = true;
2753 	pm_runtime_get_noresume(dev->dev);
2754 
2755 	outp->crtc = crtc;
2756 	outp->ctrl = NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto) | BIT(crtc->index);
2757 
2758 	drm_connector_get(conn);
2759 	conn->state->crtc = crtc;
2760 	conn->state->best_encoder = &outp->base.base;
2761 }
2762 
2763 /* Read back the currently programmed display state */
2764 static void
2765 nv50_display_read_hw_state(struct nouveau_drm *drm)
2766 {
2767 	struct drm_device *dev = drm->dev;
2768 	struct drm_encoder *encoder;
2769 	struct drm_modeset_acquire_ctx ctx;
2770 	struct nv50_disp *disp = nv50_disp(dev);
2771 	int ret;
2772 
2773 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
2774 
2775 	drm_for_each_encoder(encoder, dev) {
2776 		if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
2777 			continue;
2778 
2779 		nv50_display_read_hw_or_state(dev, disp, nouveau_encoder(encoder));
2780 	}
2781 
2782 	DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
2783 }
2784 
2785 static int
2786 nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
2787 {
2788 	struct nv50_core *core = nv50_disp(dev)->core;
2789 	struct drm_encoder *encoder;
2790 
2791 	if (resume || runtime)
2792 		core->func->init(core);
2793 
2794 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2795 		if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
2796 			struct nouveau_encoder *nv_encoder =
2797 				nouveau_encoder(encoder);
2798 			nv50_mstm_init(nv_encoder, runtime);
2799 		}
2800 	}
2801 
2802 	if (!resume)
2803 		nv50_display_read_hw_state(nouveau_drm(dev));
2804 
2805 	return 0;
2806 }
2807 
2808 static void
2809 nv50_display_destroy(struct drm_device *dev)
2810 {
2811 	struct nv50_disp *disp = nv50_disp(dev);
2812 
2813 	nv50_audio_component_fini(nouveau_drm(dev));
2814 
2815 	nvif_object_unmap(&disp->caps);
2816 	nvif_object_dtor(&disp->caps);
2817 	nv50_core_del(&disp->core);
2818 
2819 	nouveau_bo_unmap(disp->sync);
2820 	if (disp->sync)
2821 		nouveau_bo_unpin(disp->sync);
2822 	nouveau_bo_ref(NULL, &disp->sync);
2823 
2824 	nouveau_display(dev)->priv = NULL;
2825 	kfree(disp);
2826 }
2827 
2828 int
2829 nv50_display_create(struct drm_device *dev)
2830 {
2831 	struct nouveau_drm *drm = nouveau_drm(dev);
2832 	struct drm_connector *connector, *tmp;
2833 	struct nv50_disp *disp;
2834 	int ret, i;
2835 	bool has_mst = false;
2836 
2837 	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
2838 	if (!disp)
2839 		return -ENOMEM;
2840 
2841 	mutex_init(&disp->mutex);
2842 
2843 	nouveau_display(dev)->priv = disp;
2844 	nouveau_display(dev)->dtor = nv50_display_destroy;
2845 	nouveau_display(dev)->init = nv50_display_init;
2846 	nouveau_display(dev)->fini = nv50_display_fini;
2847 	disp->disp = &nouveau_display(dev)->disp;
2848 	dev->mode_config.funcs = &nv50_disp_func;
2849 	dev->mode_config.helper_private = &nv50_disp_helper_func;
2850 	dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
2851 	dev->mode_config.normalize_zpos = true;
2852 
2853 	/* small shared memory area we use for notifiers and semaphores */
2854 	ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
2855 			     NOUVEAU_GEM_DOMAIN_VRAM,
2856 			     0, 0x0000, NULL, NULL, &disp->sync);
2857 	if (!ret) {
2858 		ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
2859 		if (!ret) {
2860 			ret = nouveau_bo_map(disp->sync);
2861 			if (ret)
2862 				nouveau_bo_unpin(disp->sync);
2863 		}
2864 		if (ret)
2865 			nouveau_bo_ref(NULL, &disp->sync);
2866 	}
2867 
2868 	if (ret)
2869 		goto out;
2870 
2871 	/* allocate master evo channel */
2872 	ret = nv50_core_new(drm, &disp->core);
2873 	if (ret)
2874 		goto out;
2875 
2876 	disp->core->func->init(disp->core);
2877 	if (disp->core->func->caps_init) {
2878 		ret = disp->core->func->caps_init(drm, disp);
2879 		if (ret)
2880 			goto out;
2881 	}
2882 
2883 	/* Assign the correct format modifiers */
2884 	if (disp->disp->object.oclass >= TU102_DISP)
2885 		nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
2886 	else
2887 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
2888 		nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
2889 	else
2890 		nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
2891 
2892 	/* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
2893 	 * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
2894 	 * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
2895 	 * small page allocations in prepare_fb(). When this is implemented, we should also force
2896 	 * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
2897 	 * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
2898 	 * large pages.
2899 	 */
2900 	if (disp->disp->object.oclass >= GM107_DISP) {
2901 		dev->mode_config.cursor_width = 256;
2902 		dev->mode_config.cursor_height = 256;
2903 	} else if (disp->disp->object.oclass >= GK104_DISP) {
2904 		dev->mode_config.cursor_width = 128;
2905 		dev->mode_config.cursor_height = 128;
2906 	} else {
2907 		dev->mode_config.cursor_width = 64;
2908 		dev->mode_config.cursor_height = 64;
2909 	}
2910 
2911 	/* create encoder/connector objects based on VBIOS DCB table */
2912 	for_each_set_bit(i, &disp->disp->outp_mask, sizeof(disp->disp->outp_mask) * 8) {
2913 		struct nouveau_encoder *outp;
2914 
2915 		outp = kzalloc(sizeof(*outp), GFP_KERNEL);
2916 		if (!outp)
2917 			break;
2918 
2919 		ret = nvif_outp_ctor(disp->disp, "kmsOutp", i, &outp->outp);
2920 		if (ret) {
2921 			kfree(outp);
2922 			continue;
2923 		}
2924 
2925 		connector = nouveau_connector_create(dev, outp->outp.info.conn);
2926 		if (IS_ERR(connector)) {
2927 			nvif_outp_dtor(&outp->outp);
2928 			kfree(outp);
2929 			continue;
2930 		}
2931 
2932 		outp->base.base.possible_crtcs = outp->outp.info.heads;
2933 		outp->base.base.possible_clones = 0;
2934 		outp->conn = nouveau_connector(connector);
2935 
2936 		outp->dcb = kzalloc(sizeof(*outp->dcb), GFP_KERNEL);
2937 		if (!outp->dcb)
2938 			break;
2939 
2940 		switch (outp->outp.info.proto) {
2941 		case NVIF_OUTP_RGB_CRT:
2942 			outp->dcb->type = DCB_OUTPUT_ANALOG;
2943 			outp->dcb->crtconf.maxfreq = outp->outp.info.rgb_crt.freq_max;
2944 			break;
2945 		case NVIF_OUTP_TMDS:
2946 			outp->dcb->type = DCB_OUTPUT_TMDS;
2947 			outp->dcb->duallink_possible = outp->outp.info.tmds.dual;
2948 			break;
2949 		case NVIF_OUTP_LVDS:
2950 			outp->dcb->type = DCB_OUTPUT_LVDS;
2951 			outp->dcb->lvdsconf.use_acpi_for_edid = outp->outp.info.lvds.acpi_edid;
2952 			break;
2953 		case NVIF_OUTP_DP:
2954 			outp->dcb->type = DCB_OUTPUT_DP;
2955 			outp->dcb->dpconf.link_nr = outp->outp.info.dp.link_nr;
2956 			outp->dcb->dpconf.link_bw = outp->outp.info.dp.link_bw;
2957 			if (outp->outp.info.dp.mst)
2958 				has_mst = true;
2959 			break;
2960 		default:
2961 			WARN_ON(1);
2962 			continue;
2963 		}
2964 
2965 		outp->dcb->heads = outp->outp.info.heads;
2966 		outp->dcb->connector = outp->outp.info.conn;
2967 		outp->dcb->i2c_index = outp->outp.info.ddc;
2968 
2969 		switch (outp->outp.info.type) {
2970 		case NVIF_OUTP_DAC : ret = nv50_dac_create(outp); break;
2971 		case NVIF_OUTP_SOR : ret = nv50_sor_create(outp); break;
2972 		case NVIF_OUTP_PIOR: ret = nv50_pior_create(outp); break;
2973 		default:
2974 			WARN_ON(1);
2975 			continue;
2976 		}
2977 
2978 		if (ret) {
2979 			NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
2980 				i, outp->outp.info.type, outp->outp.info.proto, ret);
2981 		}
2982 	}
2983 
2984 	/* cull any connectors we created that don't have an encoder */
2985 	list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
2986 		if (connector->possible_encoders)
2987 			continue;
2988 
2989 		NV_WARN(drm, "%s has no encoders, removing\n",
2990 			connector->name);
2991 		connector->funcs->destroy(connector);
2992 	}
2993 
2994 	/* create crtc objects to represent the hw heads */
2995 	for_each_set_bit(i, &disp->disp->head_mask, sizeof(disp->disp->head_mask) * 8) {
2996 		struct nv50_head *head;
2997 
2998 		head = nv50_head_create(dev, i);
2999 		if (IS_ERR(head)) {
3000 			ret = PTR_ERR(head);
3001 			goto out;
3002 		}
3003 
3004 		if (has_mst) {
3005 			head->msto = nv50_msto_new(dev, head, i);
3006 			if (IS_ERR(head->msto)) {
3007 				ret = PTR_ERR(head->msto);
3008 				head->msto = NULL;
3009 				goto out;
3010 			}
3011 
3012 			/*
3013 			 * FIXME: This is a hack to workaround the following
3014 			 * issues:
3015 			 *
3016 			 * https://gitlab.gnome.org/GNOME/mutter/issues/759
3017 			 * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
3018 			 *
3019 			 * Once these issues are closed, this should be
3020 			 * removed
3021 			 */
3022 			head->msto->encoder.possible_crtcs = disp->disp->head_mask;
3023 		}
3024 	}
3025 
3026 	/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
3027 	dev->vblank_disable_immediate = true;
3028 
3029 	nv50_audio_component_init(drm);
3030 
3031 out:
3032 	if (ret)
3033 		nv50_display_destroy(dev);
3034 	return ret;
3035 }
3036 
3037 /******************************************************************************
3038  * Format modifiers
3039  *****************************************************************************/
3040 
3041 /****************************************************************
3042  *            Log2(block height) ----------------------------+  *
3043  *            Page Kind ----------------------------------+  |  *
3044  *            Gob Height/Page Kind Generation ------+     |  |  *
3045  *                          Sector layout -------+  |     |  |  *
3046  *                          Compression ------+  |  |     |  |  */
3047 const u64 disp50xx_modifiers[] = { /*         |  |  |     |  |  */
3048 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
3049 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
3050 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
3051 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
3052 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
3053 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
3054 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
3055 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
3056 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
3057 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
3058 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
3059 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
3060 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
3061 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
3062 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
3063 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
3064 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
3065 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
3066 	DRM_FORMAT_MOD_LINEAR,
3067 	DRM_FORMAT_MOD_INVALID
3068 };
3069 
3070 /****************************************************************
3071  *            Log2(block height) ----------------------------+  *
3072  *            Page Kind ----------------------------------+  |  *
3073  *            Gob Height/Page Kind Generation ------+     |  |  *
3074  *                          Sector layout -------+  |     |  |  *
3075  *                          Compression ------+  |  |     |  |  */
3076 const u64 disp90xx_modifiers[] = { /*         |  |  |     |  |  */
3077 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
3078 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
3079 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
3080 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
3081 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
3082 	DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
3083 	DRM_FORMAT_MOD_LINEAR,
3084 	DRM_FORMAT_MOD_INVALID
3085 };
3086