1fb4d8502Sjsg /*
2fb4d8502Sjsg  * Copyright 2007-8 Advanced Micro Devices, Inc.
3fb4d8502Sjsg  * Copyright 2008 Red Hat Inc.
4fb4d8502Sjsg  *
5fb4d8502Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
6fb4d8502Sjsg  * copy of this software and associated documentation files (the "Software"),
7fb4d8502Sjsg  * to deal in the Software without restriction, including without limitation
8fb4d8502Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9fb4d8502Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
10fb4d8502Sjsg  * Software is furnished to do so, subject to the following conditions:
11fb4d8502Sjsg  *
12fb4d8502Sjsg  * The above copyright notice and this permission notice shall be included in
13fb4d8502Sjsg  * all copies or substantial portions of the Software.
14fb4d8502Sjsg  *
15fb4d8502Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16fb4d8502Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17fb4d8502Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18fb4d8502Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19fb4d8502Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20fb4d8502Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21fb4d8502Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
22fb4d8502Sjsg  *
23fb4d8502Sjsg  * Authors: Dave Airlie
24fb4d8502Sjsg  *          Alex Deucher
25fb4d8502Sjsg  *          Jerome Glisse
26fb4d8502Sjsg  */
27c349dbc7Sjsg 
28fb4d8502Sjsg #include <drm/amdgpu_drm.h>
29*1bb76ff1Sjsg #include <drm/display/drm_dp_helper.h>
30*1bb76ff1Sjsg 
31fb4d8502Sjsg #include "amdgpu.h"
32fb4d8502Sjsg 
33fb4d8502Sjsg #include "atom.h"
34fb4d8502Sjsg #include "atom-bits.h"
35fb4d8502Sjsg #include "atombios_encoders.h"
36fb4d8502Sjsg #include "atombios_dp.h"
37fb4d8502Sjsg #include "amdgpu_connectors.h"
38fb4d8502Sjsg #include "amdgpu_atombios.h"
39fb4d8502Sjsg 
40fb4d8502Sjsg /* move these to drm_dp_helper.c/h */
41fb4d8502Sjsg #define DP_LINK_CONFIGURATION_SIZE 9
42fb4d8502Sjsg #define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
43fb4d8502Sjsg 
44fb4d8502Sjsg static char *voltage_names[] = {
45fb4d8502Sjsg 	"0.4V", "0.6V", "0.8V", "1.2V"
46fb4d8502Sjsg };
47fb4d8502Sjsg static char *pre_emph_names[] = {
48fb4d8502Sjsg 	"0dB", "3.5dB", "6dB", "9.5dB"
49fb4d8502Sjsg };
50fb4d8502Sjsg 
51fb4d8502Sjsg /***** amdgpu AUX functions *****/
52fb4d8502Sjsg 
53fb4d8502Sjsg union aux_channel_transaction {
54fb4d8502Sjsg 	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
55fb4d8502Sjsg 	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
56fb4d8502Sjsg };
57fb4d8502Sjsg 
amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan * chan,u8 * send,int send_bytes,u8 * recv,int recv_size,u8 delay,u8 * ack)58fb4d8502Sjsg static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
59fb4d8502Sjsg 				      u8 *send, int send_bytes,
60fb4d8502Sjsg 				      u8 *recv, int recv_size,
61fb4d8502Sjsg 				      u8 delay, u8 *ack)
62fb4d8502Sjsg {
63fb4d8502Sjsg 	struct drm_device *dev = chan->dev;
64ad8b1aafSjsg 	struct amdgpu_device *adev = drm_to_adev(dev);
65fb4d8502Sjsg 	union aux_channel_transaction args;
66fb4d8502Sjsg 	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
67fb4d8502Sjsg 	unsigned char *base;
68fb4d8502Sjsg 	int recv_bytes;
69fb4d8502Sjsg 	int r = 0;
70fb4d8502Sjsg 
71fb4d8502Sjsg 	memset(&args, 0, sizeof(args));
72fb4d8502Sjsg 
73fb4d8502Sjsg 	mutex_lock(&chan->mutex);
74fb4d8502Sjsg 
75fb4d8502Sjsg 	base = (unsigned char *)(adev->mode_info.atom_context->scratch + 1);
76fb4d8502Sjsg 
77fb4d8502Sjsg 	amdgpu_atombios_copy_swap(base, send, send_bytes, true);
78fb4d8502Sjsg 
79fb4d8502Sjsg 	args.v2.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
80fb4d8502Sjsg 	args.v2.lpDataOut = cpu_to_le16((u16)(16 + 4));
81fb4d8502Sjsg 	args.v2.ucDataOutLen = 0;
82fb4d8502Sjsg 	args.v2.ucChannelID = chan->rec.i2c_id;
83fb4d8502Sjsg 	args.v2.ucDelay = delay / 10;
84fb4d8502Sjsg 	args.v2.ucHPD_ID = chan->rec.hpd;
85fb4d8502Sjsg 
86fb4d8502Sjsg 	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
87fb4d8502Sjsg 
88fb4d8502Sjsg 	*ack = args.v2.ucReplyStatus;
89fb4d8502Sjsg 
90fb4d8502Sjsg 	/* timeout */
91fb4d8502Sjsg 	if (args.v2.ucReplyStatus == 1) {
92fb4d8502Sjsg 		r = -ETIMEDOUT;
93fb4d8502Sjsg 		goto done;
94fb4d8502Sjsg 	}
95fb4d8502Sjsg 
96fb4d8502Sjsg 	/* flags not zero */
97fb4d8502Sjsg 	if (args.v2.ucReplyStatus == 2) {
98fb4d8502Sjsg 		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
99fb4d8502Sjsg 		r = -EIO;
100fb4d8502Sjsg 		goto done;
101fb4d8502Sjsg 	}
102fb4d8502Sjsg 
103fb4d8502Sjsg 	/* error */
104fb4d8502Sjsg 	if (args.v2.ucReplyStatus == 3) {
105fb4d8502Sjsg 		DRM_DEBUG_KMS("dp_aux_ch error\n");
106fb4d8502Sjsg 		r = -EIO;
107fb4d8502Sjsg 		goto done;
108fb4d8502Sjsg 	}
109fb4d8502Sjsg 
110fb4d8502Sjsg 	recv_bytes = args.v1.ucDataOutLen;
111fb4d8502Sjsg 	if (recv_bytes > recv_size)
112fb4d8502Sjsg 		recv_bytes = recv_size;
113fb4d8502Sjsg 
114fb4d8502Sjsg 	if (recv && recv_size)
115fb4d8502Sjsg 		amdgpu_atombios_copy_swap(recv, base + 16, recv_bytes, false);
116fb4d8502Sjsg 
117fb4d8502Sjsg 	r = recv_bytes;
118fb4d8502Sjsg done:
119fb4d8502Sjsg 	mutex_unlock(&chan->mutex);
120fb4d8502Sjsg 
121fb4d8502Sjsg 	return r;
122fb4d8502Sjsg }
123fb4d8502Sjsg 
124fb4d8502Sjsg #define BARE_ADDRESS_SIZE 3
125fb4d8502Sjsg #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
126fb4d8502Sjsg 
127fb4d8502Sjsg static ssize_t
amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)128fb4d8502Sjsg amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
129fb4d8502Sjsg {
130fb4d8502Sjsg 	struct amdgpu_i2c_chan *chan =
131fb4d8502Sjsg 		container_of(aux, struct amdgpu_i2c_chan, aux);
132fb4d8502Sjsg 	int ret;
133fb4d8502Sjsg 	u8 tx_buf[20];
134fb4d8502Sjsg 	size_t tx_size;
135fb4d8502Sjsg 	u8 ack, delay = 0;
136fb4d8502Sjsg 
137fb4d8502Sjsg 	if (WARN_ON(msg->size > 16))
138fb4d8502Sjsg 		return -E2BIG;
139fb4d8502Sjsg 
140fb4d8502Sjsg 	tx_buf[0] = msg->address & 0xff;
141fb4d8502Sjsg 	tx_buf[1] = msg->address >> 8;
142fb4d8502Sjsg 	tx_buf[2] = (msg->request << 4) |
143fb4d8502Sjsg 		((msg->address >> 16) & 0xf);
144fb4d8502Sjsg 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
145fb4d8502Sjsg 
146fb4d8502Sjsg 	switch (msg->request & ~DP_AUX_I2C_MOT) {
147fb4d8502Sjsg 	case DP_AUX_NATIVE_WRITE:
148fb4d8502Sjsg 	case DP_AUX_I2C_WRITE:
149fb4d8502Sjsg 		/* tx_size needs to be 4 even for bare address packets since the atom
150fb4d8502Sjsg 		 * table needs the info in tx_buf[3].
151fb4d8502Sjsg 		 */
152fb4d8502Sjsg 		tx_size = HEADER_SIZE + msg->size;
153fb4d8502Sjsg 		if (msg->size == 0)
154fb4d8502Sjsg 			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
155fb4d8502Sjsg 		else
156fb4d8502Sjsg 			tx_buf[3] |= tx_size << 4;
157fb4d8502Sjsg 		memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
158fb4d8502Sjsg 		ret = amdgpu_atombios_dp_process_aux_ch(chan,
159fb4d8502Sjsg 						 tx_buf, tx_size, NULL, 0, delay, &ack);
160fb4d8502Sjsg 		if (ret >= 0)
161fb4d8502Sjsg 			/* Return payload size. */
162fb4d8502Sjsg 			ret = msg->size;
163fb4d8502Sjsg 		break;
164fb4d8502Sjsg 	case DP_AUX_NATIVE_READ:
165fb4d8502Sjsg 	case DP_AUX_I2C_READ:
166fb4d8502Sjsg 		/* tx_size needs to be 4 even for bare address packets since the atom
167fb4d8502Sjsg 		 * table needs the info in tx_buf[3].
168fb4d8502Sjsg 		 */
169fb4d8502Sjsg 		tx_size = HEADER_SIZE;
170fb4d8502Sjsg 		if (msg->size == 0)
171fb4d8502Sjsg 			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
172fb4d8502Sjsg 		else
173fb4d8502Sjsg 			tx_buf[3] |= tx_size << 4;
174fb4d8502Sjsg 		ret = amdgpu_atombios_dp_process_aux_ch(chan,
175fb4d8502Sjsg 						 tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
176fb4d8502Sjsg 		break;
177fb4d8502Sjsg 	default:
178fb4d8502Sjsg 		ret = -EINVAL;
179fb4d8502Sjsg 		break;
180fb4d8502Sjsg 	}
181fb4d8502Sjsg 
182fb4d8502Sjsg 	if (ret >= 0)
183fb4d8502Sjsg 		msg->reply = ack >> 4;
184fb4d8502Sjsg 
185fb4d8502Sjsg 	return ret;
186fb4d8502Sjsg }
187fb4d8502Sjsg 
amdgpu_atombios_dp_aux_init(struct amdgpu_connector * amdgpu_connector)188fb4d8502Sjsg void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
189fb4d8502Sjsg {
190fb4d8502Sjsg 	amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
191fb4d8502Sjsg 	amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
1925ca02815Sjsg 	amdgpu_connector->ddc_bus->aux.drm_dev = amdgpu_connector->base.dev;
1935ca02815Sjsg 
194c349dbc7Sjsg 	drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
195fb4d8502Sjsg 	amdgpu_connector->ddc_bus->has_aux = true;
196fb4d8502Sjsg }
197fb4d8502Sjsg 
198fb4d8502Sjsg /***** general DP utility functions *****/
199fb4d8502Sjsg 
200fb4d8502Sjsg #define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
201fb4d8502Sjsg #define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPH_LEVEL_3
202fb4d8502Sjsg 
amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],int lane_count,u8 train_set[4])203fb4d8502Sjsg static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
204fb4d8502Sjsg 						int lane_count,
205fb4d8502Sjsg 						u8 train_set[4])
206fb4d8502Sjsg {
207fb4d8502Sjsg 	u8 v = 0;
208fb4d8502Sjsg 	u8 p = 0;
209fb4d8502Sjsg 	int lane;
210fb4d8502Sjsg 
211fb4d8502Sjsg 	for (lane = 0; lane < lane_count; lane++) {
212fb4d8502Sjsg 		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
213fb4d8502Sjsg 		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
214fb4d8502Sjsg 
215fb4d8502Sjsg 		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
216fb4d8502Sjsg 			  lane,
217fb4d8502Sjsg 			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
218fb4d8502Sjsg 			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
219fb4d8502Sjsg 
220fb4d8502Sjsg 		if (this_v > v)
221fb4d8502Sjsg 			v = this_v;
222fb4d8502Sjsg 		if (this_p > p)
223fb4d8502Sjsg 			p = this_p;
224fb4d8502Sjsg 	}
225fb4d8502Sjsg 
226fb4d8502Sjsg 	if (v >= DP_VOLTAGE_MAX)
227fb4d8502Sjsg 		v |= DP_TRAIN_MAX_SWING_REACHED;
228fb4d8502Sjsg 
229fb4d8502Sjsg 	if (p >= DP_PRE_EMPHASIS_MAX)
230fb4d8502Sjsg 		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
231fb4d8502Sjsg 
232fb4d8502Sjsg 	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
233fb4d8502Sjsg 		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
234fb4d8502Sjsg 		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
235fb4d8502Sjsg 
236fb4d8502Sjsg 	for (lane = 0; lane < 4; lane++)
237fb4d8502Sjsg 		train_set[lane] = v | p;
238fb4d8502Sjsg }
239fb4d8502Sjsg 
240fb4d8502Sjsg /* convert bits per color to bits per pixel */
241fb4d8502Sjsg /* get bpc from the EDID */
amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)242fb4d8502Sjsg static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
243fb4d8502Sjsg {
244fb4d8502Sjsg 	if (bpc == 0)
245fb4d8502Sjsg 		return 24;
246fb4d8502Sjsg 	else
247fb4d8502Sjsg 		return bpc * 3;
248fb4d8502Sjsg }
249fb4d8502Sjsg 
250fb4d8502Sjsg /***** amdgpu specific DP functions *****/
251fb4d8502Sjsg 
amdgpu_atombios_dp_get_dp_link_config(struct drm_connector * connector,const u8 dpcd[DP_DPCD_SIZE],unsigned pix_clock,unsigned * dp_lanes,unsigned * dp_rate)252fb4d8502Sjsg static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
253fb4d8502Sjsg 						 const u8 dpcd[DP_DPCD_SIZE],
254fb4d8502Sjsg 						 unsigned pix_clock,
255fb4d8502Sjsg 						 unsigned *dp_lanes, unsigned *dp_rate)
256fb4d8502Sjsg {
257fb4d8502Sjsg 	unsigned bpp =
258fb4d8502Sjsg 		amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
259fb4d8502Sjsg 	static const unsigned link_rates[3] = { 162000, 270000, 540000 };
260fb4d8502Sjsg 	unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
261fb4d8502Sjsg 	unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
262fb4d8502Sjsg 	unsigned lane_num, i, max_pix_clock;
263fb4d8502Sjsg 
264fb4d8502Sjsg 	if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
265fb4d8502Sjsg 	    ENCODER_OBJECT_ID_NUTMEG) {
266fb4d8502Sjsg 		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
267fb4d8502Sjsg 			max_pix_clock = (lane_num * 270000 * 8) / bpp;
268fb4d8502Sjsg 			if (max_pix_clock >= pix_clock) {
269fb4d8502Sjsg 				*dp_lanes = lane_num;
270fb4d8502Sjsg 				*dp_rate = 270000;
271fb4d8502Sjsg 				return 0;
272fb4d8502Sjsg 			}
273fb4d8502Sjsg 		}
274fb4d8502Sjsg 	} else {
275fb4d8502Sjsg 		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
276fb4d8502Sjsg 			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
277fb4d8502Sjsg 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
278fb4d8502Sjsg 				if (max_pix_clock >= pix_clock) {
279fb4d8502Sjsg 					*dp_lanes = lane_num;
280fb4d8502Sjsg 					*dp_rate = link_rates[i];
281fb4d8502Sjsg 					return 0;
282fb4d8502Sjsg 				}
283fb4d8502Sjsg 			}
284fb4d8502Sjsg 		}
285fb4d8502Sjsg 	}
286fb4d8502Sjsg 
287fb4d8502Sjsg 	return -EINVAL;
288fb4d8502Sjsg }
289fb4d8502Sjsg 
amdgpu_atombios_dp_encoder_service(struct amdgpu_device * adev,int action,int dp_clock,u8 ucconfig,u8 lane_num)290fb4d8502Sjsg static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
291fb4d8502Sjsg 				      int action, int dp_clock,
292fb4d8502Sjsg 				      u8 ucconfig, u8 lane_num)
293fb4d8502Sjsg {
294fb4d8502Sjsg 	DP_ENCODER_SERVICE_PARAMETERS args;
295fb4d8502Sjsg 	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
296fb4d8502Sjsg 
297fb4d8502Sjsg 	memset(&args, 0, sizeof(args));
298fb4d8502Sjsg 	args.ucLinkClock = dp_clock / 10;
299fb4d8502Sjsg 	args.ucConfig = ucconfig;
300fb4d8502Sjsg 	args.ucAction = action;
301fb4d8502Sjsg 	args.ucLaneNum = lane_num;
302fb4d8502Sjsg 	args.ucStatus = 0;
303fb4d8502Sjsg 
304fb4d8502Sjsg 	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
305fb4d8502Sjsg 	return args.ucStatus;
306fb4d8502Sjsg }
307fb4d8502Sjsg 
amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector * amdgpu_connector)308fb4d8502Sjsg u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
309fb4d8502Sjsg {
310fb4d8502Sjsg 	struct drm_device *dev = amdgpu_connector->base.dev;
311ad8b1aafSjsg 	struct amdgpu_device *adev = drm_to_adev(dev);
312fb4d8502Sjsg 
313fb4d8502Sjsg 	return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
314fb4d8502Sjsg 					   amdgpu_connector->ddc_bus->rec.i2c_id, 0);
315fb4d8502Sjsg }
316fb4d8502Sjsg 
amdgpu_atombios_dp_probe_oui(struct amdgpu_connector * amdgpu_connector)317fb4d8502Sjsg static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connector)
318fb4d8502Sjsg {
319fb4d8502Sjsg 	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
320fb4d8502Sjsg 	u8 buf[3];
321fb4d8502Sjsg 
322fb4d8502Sjsg 	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
323fb4d8502Sjsg 		return;
324fb4d8502Sjsg 
325fb4d8502Sjsg 	if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
326fb4d8502Sjsg 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
327fb4d8502Sjsg 			      buf[0], buf[1], buf[2]);
328fb4d8502Sjsg 
329fb4d8502Sjsg 	if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
330fb4d8502Sjsg 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
331fb4d8502Sjsg 			      buf[0], buf[1], buf[2]);
332fb4d8502Sjsg }
333fb4d8502Sjsg 
amdgpu_atombios_dp_ds_ports(struct amdgpu_connector * amdgpu_connector)334ad8b1aafSjsg static void amdgpu_atombios_dp_ds_ports(struct amdgpu_connector *amdgpu_connector)
335ad8b1aafSjsg {
336ad8b1aafSjsg 	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
337ad8b1aafSjsg 	int ret;
338ad8b1aafSjsg 
339ad8b1aafSjsg 	if (dig_connector->dpcd[DP_DPCD_REV] > 0x10) {
340ad8b1aafSjsg 		ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux,
341ad8b1aafSjsg 				       DP_DOWNSTREAM_PORT_0,
342ad8b1aafSjsg 				       dig_connector->downstream_ports,
343ad8b1aafSjsg 				       DP_MAX_DOWNSTREAM_PORTS);
344ad8b1aafSjsg 		if (ret)
345ad8b1aafSjsg 			memset(dig_connector->downstream_ports, 0,
346ad8b1aafSjsg 			       DP_MAX_DOWNSTREAM_PORTS);
347ad8b1aafSjsg 	}
348ad8b1aafSjsg }
349ad8b1aafSjsg 
amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector * amdgpu_connector)350fb4d8502Sjsg int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
351fb4d8502Sjsg {
352fb4d8502Sjsg 	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
353fb4d8502Sjsg 	u8 msg[DP_DPCD_SIZE];
354fb4d8502Sjsg 	int ret;
355fb4d8502Sjsg 
356fb4d8502Sjsg 	ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
357fb4d8502Sjsg 			       msg, DP_DPCD_SIZE);
358fb4d8502Sjsg 	if (ret == DP_DPCD_SIZE) {
359fb4d8502Sjsg 		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
360fb4d8502Sjsg 
361fb4d8502Sjsg 		DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
362fb4d8502Sjsg 			      dig_connector->dpcd);
363fb4d8502Sjsg 
364fb4d8502Sjsg 		amdgpu_atombios_dp_probe_oui(amdgpu_connector);
365ad8b1aafSjsg 		amdgpu_atombios_dp_ds_ports(amdgpu_connector);
366fb4d8502Sjsg 		return 0;
367fb4d8502Sjsg 	}
368fb4d8502Sjsg 
369fb4d8502Sjsg 	dig_connector->dpcd[0] = 0;
370fb4d8502Sjsg 	return -EINVAL;
371fb4d8502Sjsg }
372fb4d8502Sjsg 
amdgpu_atombios_dp_get_panel_mode(struct drm_encoder * encoder,struct drm_connector * connector)373fb4d8502Sjsg int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
374fb4d8502Sjsg 			       struct drm_connector *connector)
375fb4d8502Sjsg {
376fb4d8502Sjsg 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
377fb4d8502Sjsg 	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
378fb4d8502Sjsg 	u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
379fb4d8502Sjsg 	u8 tmp;
380fb4d8502Sjsg 
381fb4d8502Sjsg 	if (!amdgpu_connector->con_priv)
382fb4d8502Sjsg 		return panel_mode;
383fb4d8502Sjsg 
384fb4d8502Sjsg 	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
385fb4d8502Sjsg 		/* DP bridge chips */
386fb4d8502Sjsg 		if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
387fb4d8502Sjsg 				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
388fb4d8502Sjsg 			if (tmp & 1)
389fb4d8502Sjsg 				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
390fb4d8502Sjsg 			else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
391fb4d8502Sjsg 				 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
392fb4d8502Sjsg 				panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
393fb4d8502Sjsg 			else
394fb4d8502Sjsg 				panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
395fb4d8502Sjsg 		}
396fb4d8502Sjsg 	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
397fb4d8502Sjsg 		/* eDP */
398fb4d8502Sjsg 		if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
399fb4d8502Sjsg 				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
400fb4d8502Sjsg 			if (tmp & 1)
401fb4d8502Sjsg 				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
402fb4d8502Sjsg 		}
403fb4d8502Sjsg 	}
404fb4d8502Sjsg 
405fb4d8502Sjsg 	return panel_mode;
406fb4d8502Sjsg }
407fb4d8502Sjsg 
amdgpu_atombios_dp_set_link_config(struct drm_connector * connector,const struct drm_display_mode * mode)408fb4d8502Sjsg void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
409fb4d8502Sjsg 				 const struct drm_display_mode *mode)
410fb4d8502Sjsg {
411fb4d8502Sjsg 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
412fb4d8502Sjsg 	struct amdgpu_connector_atom_dig *dig_connector;
413fb4d8502Sjsg 	int ret;
414fb4d8502Sjsg 
415fb4d8502Sjsg 	if (!amdgpu_connector->con_priv)
416fb4d8502Sjsg 		return;
417fb4d8502Sjsg 	dig_connector = amdgpu_connector->con_priv;
418fb4d8502Sjsg 
419fb4d8502Sjsg 	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
420fb4d8502Sjsg 	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
421fb4d8502Sjsg 		ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
422fb4d8502Sjsg 							    mode->clock,
423fb4d8502Sjsg 							    &dig_connector->dp_lane_count,
424fb4d8502Sjsg 							    &dig_connector->dp_clock);
425fb4d8502Sjsg 		if (ret) {
426fb4d8502Sjsg 			dig_connector->dp_clock = 0;
427fb4d8502Sjsg 			dig_connector->dp_lane_count = 0;
428fb4d8502Sjsg 		}
429fb4d8502Sjsg 	}
430fb4d8502Sjsg }
431fb4d8502Sjsg 
amdgpu_atombios_dp_mode_valid_helper(struct drm_connector * connector,struct drm_display_mode * mode)432fb4d8502Sjsg int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
433fb4d8502Sjsg 				  struct drm_display_mode *mode)
434fb4d8502Sjsg {
435fb4d8502Sjsg 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
436fb4d8502Sjsg 	struct amdgpu_connector_atom_dig *dig_connector;
437fb4d8502Sjsg 	unsigned dp_lanes, dp_clock;
438fb4d8502Sjsg 	int ret;
439fb4d8502Sjsg 
440fb4d8502Sjsg 	if (!amdgpu_connector->con_priv)
441fb4d8502Sjsg 		return MODE_CLOCK_HIGH;
442fb4d8502Sjsg 	dig_connector = amdgpu_connector->con_priv;
443fb4d8502Sjsg 
444fb4d8502Sjsg 	ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
445fb4d8502Sjsg 						    mode->clock, &dp_lanes, &dp_clock);
446fb4d8502Sjsg 	if (ret)
447fb4d8502Sjsg 		return MODE_CLOCK_HIGH;
448fb4d8502Sjsg 
449fb4d8502Sjsg 	if ((dp_clock == 540000) &&
450fb4d8502Sjsg 	    (!amdgpu_connector_is_dp12_capable(connector)))
451fb4d8502Sjsg 		return MODE_CLOCK_HIGH;
452fb4d8502Sjsg 
453fb4d8502Sjsg 	return MODE_OK;
454fb4d8502Sjsg }
455fb4d8502Sjsg 
amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector * amdgpu_connector)456fb4d8502Sjsg bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector)
457fb4d8502Sjsg {
458fb4d8502Sjsg 	u8 link_status[DP_LINK_STATUS_SIZE];
459fb4d8502Sjsg 	struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
460fb4d8502Sjsg 
461fb4d8502Sjsg 	if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
462fb4d8502Sjsg 	    <= 0)
463fb4d8502Sjsg 		return false;
464fb4d8502Sjsg 	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
465fb4d8502Sjsg 		return false;
466fb4d8502Sjsg 	return true;
467fb4d8502Sjsg }
468fb4d8502Sjsg 
amdgpu_atombios_dp_set_rx_power_state(struct drm_connector * connector,u8 power_state)469fb4d8502Sjsg void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
470fb4d8502Sjsg 				    u8 power_state)
471fb4d8502Sjsg {
472fb4d8502Sjsg 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
473fb4d8502Sjsg 	struct amdgpu_connector_atom_dig *dig_connector;
474fb4d8502Sjsg 
475fb4d8502Sjsg 	if (!amdgpu_connector->con_priv)
476fb4d8502Sjsg 		return;
477fb4d8502Sjsg 
478fb4d8502Sjsg 	dig_connector = amdgpu_connector->con_priv;
479fb4d8502Sjsg 
480fb4d8502Sjsg 	/* power up/down the sink */
481fb4d8502Sjsg 	if (dig_connector->dpcd[0] >= 0x11) {
482fb4d8502Sjsg 		drm_dp_dpcd_writeb(&amdgpu_connector->ddc_bus->aux,
483fb4d8502Sjsg 				   DP_SET_POWER, power_state);
484fb4d8502Sjsg 		usleep_range(1000, 2000);
485fb4d8502Sjsg 	}
486fb4d8502Sjsg }
487fb4d8502Sjsg 
488fb4d8502Sjsg struct amdgpu_atombios_dp_link_train_info {
489fb4d8502Sjsg 	struct amdgpu_device *adev;
490fb4d8502Sjsg 	struct drm_encoder *encoder;
491fb4d8502Sjsg 	struct drm_connector *connector;
492fb4d8502Sjsg 	int dp_clock;
493fb4d8502Sjsg 	int dp_lane_count;
494fb4d8502Sjsg 	bool tp3_supported;
495fb4d8502Sjsg 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
496fb4d8502Sjsg 	u8 train_set[4];
497fb4d8502Sjsg 	u8 link_status[DP_LINK_STATUS_SIZE];
498fb4d8502Sjsg 	u8 tries;
499fb4d8502Sjsg 	struct drm_dp_aux *aux;
500fb4d8502Sjsg };
501fb4d8502Sjsg 
502fb4d8502Sjsg static void
amdgpu_atombios_dp_update_vs_emph(struct amdgpu_atombios_dp_link_train_info * dp_info)503fb4d8502Sjsg amdgpu_atombios_dp_update_vs_emph(struct amdgpu_atombios_dp_link_train_info *dp_info)
504fb4d8502Sjsg {
505fb4d8502Sjsg 	/* set the initial vs/emph on the source */
506fb4d8502Sjsg 	amdgpu_atombios_encoder_setup_dig_transmitter(dp_info->encoder,
507fb4d8502Sjsg 					       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
508fb4d8502Sjsg 					       0, dp_info->train_set[0]); /* sets all lanes at once */
509fb4d8502Sjsg 
510fb4d8502Sjsg 	/* set the vs/emph on the sink */
511fb4d8502Sjsg 	drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
512fb4d8502Sjsg 			  dp_info->train_set, dp_info->dp_lane_count);
513fb4d8502Sjsg }
514fb4d8502Sjsg 
515fb4d8502Sjsg static void
amdgpu_atombios_dp_set_tp(struct amdgpu_atombios_dp_link_train_info * dp_info,int tp)516fb4d8502Sjsg amdgpu_atombios_dp_set_tp(struct amdgpu_atombios_dp_link_train_info *dp_info, int tp)
517fb4d8502Sjsg {
518fb4d8502Sjsg 	int rtp = 0;
519fb4d8502Sjsg 
520fb4d8502Sjsg 	/* set training pattern on the source */
521fb4d8502Sjsg 	switch (tp) {
522fb4d8502Sjsg 	case DP_TRAINING_PATTERN_1:
523fb4d8502Sjsg 		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
524fb4d8502Sjsg 		break;
525fb4d8502Sjsg 	case DP_TRAINING_PATTERN_2:
526fb4d8502Sjsg 		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
527fb4d8502Sjsg 		break;
528fb4d8502Sjsg 	case DP_TRAINING_PATTERN_3:
529fb4d8502Sjsg 		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
530fb4d8502Sjsg 			break;
531fb4d8502Sjsg 	}
532fb4d8502Sjsg 	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, rtp, 0);
533fb4d8502Sjsg 
534fb4d8502Sjsg 	/* enable training pattern on the sink */
535fb4d8502Sjsg 	drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
536fb4d8502Sjsg }
537fb4d8502Sjsg 
538fb4d8502Sjsg static int
amdgpu_atombios_dp_link_train_init(struct amdgpu_atombios_dp_link_train_info * dp_info)539fb4d8502Sjsg amdgpu_atombios_dp_link_train_init(struct amdgpu_atombios_dp_link_train_info *dp_info)
540fb4d8502Sjsg {
541fb4d8502Sjsg 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(dp_info->encoder);
542fb4d8502Sjsg 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
543fb4d8502Sjsg 	u8 tmp;
544fb4d8502Sjsg 
545fb4d8502Sjsg 	/* power up the sink */
546fb4d8502Sjsg 	amdgpu_atombios_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
547fb4d8502Sjsg 
548fb4d8502Sjsg 	/* possibly enable downspread on the sink */
549fb4d8502Sjsg 	if (dp_info->dpcd[3] & 0x1)
550fb4d8502Sjsg 		drm_dp_dpcd_writeb(dp_info->aux,
551fb4d8502Sjsg 				   DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
552fb4d8502Sjsg 	else
553fb4d8502Sjsg 		drm_dp_dpcd_writeb(dp_info->aux,
554fb4d8502Sjsg 				   DP_DOWNSPREAD_CTRL, 0);
555fb4d8502Sjsg 
556fb4d8502Sjsg 	if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
557fb4d8502Sjsg 		drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
558fb4d8502Sjsg 
559fb4d8502Sjsg 	/* set the lane count on the sink */
560fb4d8502Sjsg 	tmp = dp_info->dp_lane_count;
561fb4d8502Sjsg 	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
562fb4d8502Sjsg 		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
563fb4d8502Sjsg 	drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
564fb4d8502Sjsg 
565fb4d8502Sjsg 	/* set the link rate on the sink */
566fb4d8502Sjsg 	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
567fb4d8502Sjsg 	drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
568fb4d8502Sjsg 
569fb4d8502Sjsg 	/* start training on the source */
570fb4d8502Sjsg 	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
571fb4d8502Sjsg 					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
572fb4d8502Sjsg 
573fb4d8502Sjsg 	/* disable the training pattern on the sink */
574fb4d8502Sjsg 	drm_dp_dpcd_writeb(dp_info->aux,
575fb4d8502Sjsg 			   DP_TRAINING_PATTERN_SET,
576fb4d8502Sjsg 			   DP_TRAINING_PATTERN_DISABLE);
577fb4d8502Sjsg 
578fb4d8502Sjsg 	return 0;
579fb4d8502Sjsg }
580fb4d8502Sjsg 
581fb4d8502Sjsg static int
amdgpu_atombios_dp_link_train_finish(struct amdgpu_atombios_dp_link_train_info * dp_info)582fb4d8502Sjsg amdgpu_atombios_dp_link_train_finish(struct amdgpu_atombios_dp_link_train_info *dp_info)
583fb4d8502Sjsg {
584fb4d8502Sjsg 	udelay(400);
585fb4d8502Sjsg 
586fb4d8502Sjsg 	/* disable the training pattern on the sink */
587fb4d8502Sjsg 	drm_dp_dpcd_writeb(dp_info->aux,
588fb4d8502Sjsg 			   DP_TRAINING_PATTERN_SET,
589fb4d8502Sjsg 			   DP_TRAINING_PATTERN_DISABLE);
590fb4d8502Sjsg 
591fb4d8502Sjsg 	/* disable the training pattern on the source */
592fb4d8502Sjsg 	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
593fb4d8502Sjsg 					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
594fb4d8502Sjsg 
595fb4d8502Sjsg 	return 0;
596fb4d8502Sjsg }
597fb4d8502Sjsg 
598fb4d8502Sjsg static int
amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info * dp_info)599fb4d8502Sjsg amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_info)
600fb4d8502Sjsg {
601fb4d8502Sjsg 	bool clock_recovery;
602fb4d8502Sjsg 	u8 voltage;
603fb4d8502Sjsg 	int i;
604fb4d8502Sjsg 
605fb4d8502Sjsg 	amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
606fb4d8502Sjsg 	memset(dp_info->train_set, 0, 4);
607fb4d8502Sjsg 	amdgpu_atombios_dp_update_vs_emph(dp_info);
608fb4d8502Sjsg 
609fb4d8502Sjsg 	udelay(400);
610fb4d8502Sjsg 
611fb4d8502Sjsg 	/* clock recovery loop */
612fb4d8502Sjsg 	clock_recovery = false;
613fb4d8502Sjsg 	dp_info->tries = 0;
614fb4d8502Sjsg 	voltage = 0xff;
615fb4d8502Sjsg 	while (1) {
6165ca02815Sjsg 		drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
617fb4d8502Sjsg 
618fb4d8502Sjsg 		if (drm_dp_dpcd_read_link_status(dp_info->aux,
619fb4d8502Sjsg 						 dp_info->link_status) <= 0) {
620fb4d8502Sjsg 			DRM_ERROR("displayport link status failed\n");
621fb4d8502Sjsg 			break;
622fb4d8502Sjsg 		}
623fb4d8502Sjsg 
624fb4d8502Sjsg 		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
625fb4d8502Sjsg 			clock_recovery = true;
626fb4d8502Sjsg 			break;
627fb4d8502Sjsg 		}
628fb4d8502Sjsg 
629fb4d8502Sjsg 		for (i = 0; i < dp_info->dp_lane_count; i++) {
630fb4d8502Sjsg 			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
631fb4d8502Sjsg 				break;
632fb4d8502Sjsg 		}
633fb4d8502Sjsg 		if (i == dp_info->dp_lane_count) {
634fb4d8502Sjsg 			DRM_ERROR("clock recovery reached max voltage\n");
635fb4d8502Sjsg 			break;
636fb4d8502Sjsg 		}
637fb4d8502Sjsg 
638fb4d8502Sjsg 		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
639fb4d8502Sjsg 			++dp_info->tries;
640fb4d8502Sjsg 			if (dp_info->tries == 5) {
641fb4d8502Sjsg 				DRM_ERROR("clock recovery tried 5 times\n");
642fb4d8502Sjsg 				break;
643fb4d8502Sjsg 			}
644fb4d8502Sjsg 		} else
645fb4d8502Sjsg 			dp_info->tries = 0;
646fb4d8502Sjsg 
647fb4d8502Sjsg 		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
648fb4d8502Sjsg 
649fb4d8502Sjsg 		/* Compute new train_set as requested by sink */
650fb4d8502Sjsg 		amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
651fb4d8502Sjsg 					     dp_info->train_set);
652fb4d8502Sjsg 
653fb4d8502Sjsg 		amdgpu_atombios_dp_update_vs_emph(dp_info);
654fb4d8502Sjsg 	}
655fb4d8502Sjsg 	if (!clock_recovery) {
656fb4d8502Sjsg 		DRM_ERROR("clock recovery failed\n");
657fb4d8502Sjsg 		return -1;
658fb4d8502Sjsg 	} else {
659fb4d8502Sjsg 		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
660fb4d8502Sjsg 			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
661fb4d8502Sjsg 			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
662fb4d8502Sjsg 			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
663fb4d8502Sjsg 		return 0;
664fb4d8502Sjsg 	}
665fb4d8502Sjsg }
666fb4d8502Sjsg 
667fb4d8502Sjsg static int
amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info * dp_info)668fb4d8502Sjsg amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_info)
669fb4d8502Sjsg {
670fb4d8502Sjsg 	bool channel_eq;
671fb4d8502Sjsg 
672fb4d8502Sjsg 	if (dp_info->tp3_supported)
673fb4d8502Sjsg 		amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
674fb4d8502Sjsg 	else
675fb4d8502Sjsg 		amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
676fb4d8502Sjsg 
677fb4d8502Sjsg 	/* channel equalization loop */
678fb4d8502Sjsg 	dp_info->tries = 0;
679fb4d8502Sjsg 	channel_eq = false;
680fb4d8502Sjsg 	while (1) {
6815ca02815Sjsg 		drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
682fb4d8502Sjsg 
683fb4d8502Sjsg 		if (drm_dp_dpcd_read_link_status(dp_info->aux,
684fb4d8502Sjsg 						 dp_info->link_status) <= 0) {
685fb4d8502Sjsg 			DRM_ERROR("displayport link status failed\n");
686fb4d8502Sjsg 			break;
687fb4d8502Sjsg 		}
688fb4d8502Sjsg 
689fb4d8502Sjsg 		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
690fb4d8502Sjsg 			channel_eq = true;
691fb4d8502Sjsg 			break;
692fb4d8502Sjsg 		}
693fb4d8502Sjsg 
694fb4d8502Sjsg 		/* Try 5 times */
695fb4d8502Sjsg 		if (dp_info->tries > 5) {
696fb4d8502Sjsg 			DRM_ERROR("channel eq failed: 5 tries\n");
697fb4d8502Sjsg 			break;
698fb4d8502Sjsg 		}
699fb4d8502Sjsg 
700fb4d8502Sjsg 		/* Compute new train_set as requested by sink */
701fb4d8502Sjsg 		amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
702fb4d8502Sjsg 					     dp_info->train_set);
703fb4d8502Sjsg 
704fb4d8502Sjsg 		amdgpu_atombios_dp_update_vs_emph(dp_info);
705fb4d8502Sjsg 		dp_info->tries++;
706fb4d8502Sjsg 	}
707fb4d8502Sjsg 
708fb4d8502Sjsg 	if (!channel_eq) {
709fb4d8502Sjsg 		DRM_ERROR("channel eq failed\n");
710fb4d8502Sjsg 		return -1;
711fb4d8502Sjsg 	} else {
712fb4d8502Sjsg 		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
713fb4d8502Sjsg 			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
714fb4d8502Sjsg 			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
715fb4d8502Sjsg 			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
716fb4d8502Sjsg 		return 0;
717fb4d8502Sjsg 	}
718fb4d8502Sjsg }
719fb4d8502Sjsg 
amdgpu_atombios_dp_link_train(struct drm_encoder * encoder,struct drm_connector * connector)720fb4d8502Sjsg void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
721fb4d8502Sjsg 			    struct drm_connector *connector)
722fb4d8502Sjsg {
723fb4d8502Sjsg 	struct drm_device *dev = encoder->dev;
724ad8b1aafSjsg 	struct amdgpu_device *adev = drm_to_adev(dev);
725fb4d8502Sjsg 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
726fb4d8502Sjsg 	struct amdgpu_connector *amdgpu_connector;
727fb4d8502Sjsg 	struct amdgpu_connector_atom_dig *dig_connector;
728fb4d8502Sjsg 	struct amdgpu_atombios_dp_link_train_info dp_info;
729fb4d8502Sjsg 	u8 tmp;
730fb4d8502Sjsg 
731fb4d8502Sjsg 	if (!amdgpu_encoder->enc_priv)
732fb4d8502Sjsg 		return;
733fb4d8502Sjsg 
734fb4d8502Sjsg 	amdgpu_connector = to_amdgpu_connector(connector);
735fb4d8502Sjsg 	if (!amdgpu_connector->con_priv)
736fb4d8502Sjsg 		return;
737fb4d8502Sjsg 	dig_connector = amdgpu_connector->con_priv;
738fb4d8502Sjsg 
739fb4d8502Sjsg 	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
740fb4d8502Sjsg 	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
741fb4d8502Sjsg 		return;
742fb4d8502Sjsg 
743fb4d8502Sjsg 	if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
744fb4d8502Sjsg 	    == 1) {
745fb4d8502Sjsg 		if (tmp & DP_TPS3_SUPPORTED)
746fb4d8502Sjsg 			dp_info.tp3_supported = true;
747fb4d8502Sjsg 		else
748fb4d8502Sjsg 			dp_info.tp3_supported = false;
749fb4d8502Sjsg 	} else {
750fb4d8502Sjsg 		dp_info.tp3_supported = false;
751fb4d8502Sjsg 	}
752fb4d8502Sjsg 
753fb4d8502Sjsg 	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
754fb4d8502Sjsg 	dp_info.adev = adev;
755fb4d8502Sjsg 	dp_info.encoder = encoder;
756fb4d8502Sjsg 	dp_info.connector = connector;
757fb4d8502Sjsg 	dp_info.dp_lane_count = dig_connector->dp_lane_count;
758fb4d8502Sjsg 	dp_info.dp_clock = dig_connector->dp_clock;
759fb4d8502Sjsg 	dp_info.aux = &amdgpu_connector->ddc_bus->aux;
760fb4d8502Sjsg 
761fb4d8502Sjsg 	if (amdgpu_atombios_dp_link_train_init(&dp_info))
762fb4d8502Sjsg 		goto done;
763fb4d8502Sjsg 	if (amdgpu_atombios_dp_link_train_cr(&dp_info))
764fb4d8502Sjsg 		goto done;
765fb4d8502Sjsg 	if (amdgpu_atombios_dp_link_train_ce(&dp_info))
766fb4d8502Sjsg 		goto done;
767fb4d8502Sjsg done:
768fb4d8502Sjsg 	if (amdgpu_atombios_dp_link_train_finish(&dp_info))
769fb4d8502Sjsg 		return;
770fb4d8502Sjsg }
771