1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra CEC implementation
4  *
5  * The original 3.10 CEC driver using a custom API:
6  *
7  * Copyright (c) 2012-2015, NVIDIA CORPORATION.  All rights reserved.
8  *
9  * Conversion to the CEC framework and to the mainline kernel:
10  *
11  * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/slab.h>
20 #include <linux/io.h>
21 #include <linux/clk.h>
22 #include <linux/delay.h>
23 #include <linux/pm.h>
24 #include <linux/of.h>
25 #include <linux/of_platform.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk/tegra.h>
28 
29 #include <media/cec-notifier.h>
30 
31 #include "tegra_cec.h"
32 
33 #define TEGRA_CEC_NAME "tegra-cec"
34 
35 struct tegra_cec {
36 	struct cec_adapter	*adap;
37 	struct device		*dev;
38 	struct clk		*clk;
39 	void __iomem		*cec_base;
40 	struct cec_notifier	*notifier;
41 	int			tegra_cec_irq;
42 	bool			rx_done;
43 	bool			tx_done;
44 	int			tx_status;
45 	u8			rx_buf[CEC_MAX_MSG_SIZE];
46 	u8			rx_buf_cnt;
47 	u32			tx_buf[CEC_MAX_MSG_SIZE];
48 	u8			tx_buf_cur;
49 	u8			tx_buf_cnt;
50 };
51 
52 static inline u32 cec_read(struct tegra_cec *cec, u32 reg)
53 {
54 	return readl(cec->cec_base + reg);
55 }
56 
57 static inline void cec_write(struct tegra_cec *cec, u32 reg, u32 val)
58 {
59 	writel(val, cec->cec_base + reg);
60 }
61 
62 static void tegra_cec_error_recovery(struct tegra_cec *cec)
63 {
64 	u32 hw_ctrl;
65 
66 	hw_ctrl = cec_read(cec, TEGRA_CEC_HW_CONTROL);
67 	cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
68 	cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
69 	cec_write(cec, TEGRA_CEC_HW_CONTROL, hw_ctrl);
70 }
71 
72 static irqreturn_t tegra_cec_irq_thread_handler(int irq, void *data)
73 {
74 	struct device *dev = data;
75 	struct tegra_cec *cec = dev_get_drvdata(dev);
76 
77 	if (cec->tx_done) {
78 		cec_transmit_attempt_done(cec->adap, cec->tx_status);
79 		cec->tx_done = false;
80 	}
81 	if (cec->rx_done) {
82 		struct cec_msg msg = {};
83 
84 		msg.len = cec->rx_buf_cnt;
85 		memcpy(msg.msg, cec->rx_buf, msg.len);
86 		cec_received_msg(cec->adap, &msg);
87 		cec->rx_done = false;
88 		cec->rx_buf_cnt = 0;
89 	}
90 	return IRQ_HANDLED;
91 }
92 
93 static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
94 {
95 	struct device *dev = data;
96 	struct tegra_cec *cec = dev_get_drvdata(dev);
97 	u32 status, mask;
98 
99 	status = cec_read(cec, TEGRA_CEC_INT_STAT);
100 	mask = cec_read(cec, TEGRA_CEC_INT_MASK);
101 
102 	status &= mask;
103 
104 	if (!status)
105 		return IRQ_HANDLED;
106 
107 	if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN) {
108 		dev_err(dev, "TX underrun, interrupt timing issue!\n");
109 
110 		tegra_cec_error_recovery(cec);
111 		cec_write(cec, TEGRA_CEC_INT_MASK,
112 			  mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
113 
114 		cec->tx_done = true;
115 		cec->tx_status = CEC_TX_STATUS_ERROR;
116 		return IRQ_WAKE_THREAD;
117 	}
118 
119 	if ((status & TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED) ||
120 		   (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)) {
121 		tegra_cec_error_recovery(cec);
122 		cec_write(cec, TEGRA_CEC_INT_MASK,
123 			  mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
124 
125 		cec->tx_done = true;
126 		if (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)
127 			cec->tx_status = CEC_TX_STATUS_LOW_DRIVE;
128 		else
129 			cec->tx_status = CEC_TX_STATUS_ARB_LOST;
130 		return IRQ_WAKE_THREAD;
131 	}
132 
133 	if (status & TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED) {
134 		cec_write(cec, TEGRA_CEC_INT_STAT,
135 			  TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED);
136 
137 		if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) {
138 			tegra_cec_error_recovery(cec);
139 
140 			cec->tx_done = true;
141 			cec->tx_status = CEC_TX_STATUS_NACK;
142 		} else {
143 			cec->tx_done = true;
144 			cec->tx_status = CEC_TX_STATUS_OK;
145 		}
146 		return IRQ_WAKE_THREAD;
147 	}
148 
149 	if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD)
150 		dev_warn(dev, "TX NAKed on the fly!\n");
151 
152 	if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY) {
153 		if (cec->tx_buf_cur == cec->tx_buf_cnt) {
154 			cec_write(cec, TEGRA_CEC_INT_MASK,
155 				  mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
156 		} else {
157 			cec_write(cec, TEGRA_CEC_TX_REGISTER,
158 				  cec->tx_buf[cec->tx_buf_cur++]);
159 			cec_write(cec, TEGRA_CEC_INT_STAT,
160 				  TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY);
161 		}
162 	}
163 
164 	if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) {
165 		cec_write(cec, TEGRA_CEC_INT_STAT,
166 			  TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED);
167 		cec->rx_done = false;
168 		cec->rx_buf_cnt = 0;
169 	}
170 	if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
171 		u32 v;
172 
173 		cec_write(cec, TEGRA_CEC_INT_STAT,
174 			  TEGRA_CEC_INT_STAT_RX_REGISTER_FULL);
175 		v = cec_read(cec, TEGRA_CEC_RX_REGISTER);
176 		if (cec->rx_buf_cnt < CEC_MAX_MSG_SIZE)
177 			cec->rx_buf[cec->rx_buf_cnt++] = v & 0xff;
178 		if (v & TEGRA_CEC_RX_REGISTER_EOM) {
179 			cec->rx_done = true;
180 			return IRQ_WAKE_THREAD;
181 		}
182 	}
183 
184 	return IRQ_HANDLED;
185 }
186 
187 static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
188 {
189 	struct tegra_cec *cec = adap->priv;
190 
191 	cec->rx_buf_cnt = 0;
192 	cec->tx_buf_cnt = 0;
193 	cec->tx_buf_cur = 0;
194 
195 	cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
196 	cec_write(cec, TEGRA_CEC_INT_MASK, 0);
197 	cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
198 	cec_write(cec, TEGRA_CEC_SW_CONTROL, 0);
199 
200 	if (!enable)
201 		return 0;
202 
203 	cec_write(cec, TEGRA_CEC_INPUT_FILTER, (1U << 31) | 0x20);
204 
205 	cec_write(cec, TEGRA_CEC_RX_TIMING_0,
206 		  (0x7a << TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT) |
207 		  (0x6d << TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT) |
208 		  (0x93 << TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT) |
209 		  (0x86 << TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT));
210 
211 	cec_write(cec, TEGRA_CEC_RX_TIMING_1,
212 		  (0x35 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT) |
213 		  (0x21 << TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT) |
214 		  (0x56 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT) |
215 		  (0x40 << TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT));
216 
217 	cec_write(cec, TEGRA_CEC_RX_TIMING_2,
218 		  (0x50 << TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT));
219 
220 	cec_write(cec, TEGRA_CEC_TX_TIMING_0,
221 		  (0x74 << TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT) |
222 		  (0x8d << TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT) |
223 		  (0x08 << TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT) |
224 		  (0x71 << TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT));
225 
226 	cec_write(cec, TEGRA_CEC_TX_TIMING_1,
227 		  (0x2f << TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT) |
228 		  (0x13 << TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT) |
229 		  (0x4b << TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT) |
230 		  (0x21 << TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT));
231 
232 	cec_write(cec, TEGRA_CEC_TX_TIMING_2,
233 		  (0x07 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT) |
234 		  (0x05 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT) |
235 		  (0x03 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT));
236 
237 	cec_write(cec, TEGRA_CEC_INT_MASK,
238 		  TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN |
239 		  TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD |
240 		  TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED |
241 		  TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
242 		  TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
243 		  TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
244 		  TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED);
245 
246 	cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
247 	return 0;
248 }
249 
250 static int tegra_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
251 {
252 	struct tegra_cec *cec = adap->priv;
253 	u32 state = cec_read(cec, TEGRA_CEC_HW_CONTROL);
254 
255 	if (logical_addr == CEC_LOG_ADDR_INVALID)
256 		state &= ~TEGRA_CEC_HWCTRL_RX_LADDR_MASK;
257 	else
258 		state |= TEGRA_CEC_HWCTRL_RX_LADDR((1 << logical_addr));
259 
260 	cec_write(cec, TEGRA_CEC_HW_CONTROL, state);
261 	return 0;
262 }
263 
264 static int tegra_cec_adap_monitor_all_enable(struct cec_adapter *adap,
265 					     bool enable)
266 {
267 	struct tegra_cec *cec = adap->priv;
268 	u32 reg = cec_read(cec, TEGRA_CEC_HW_CONTROL);
269 
270 	if (enable)
271 		reg |= TEGRA_CEC_HWCTRL_RX_SNOOP;
272 	else
273 		reg &= ~TEGRA_CEC_HWCTRL_RX_SNOOP;
274 	cec_write(cec, TEGRA_CEC_HW_CONTROL, reg);
275 	return 0;
276 }
277 
278 static int tegra_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
279 				   u32 signal_free_time_ms, struct cec_msg *msg)
280 {
281 	bool retry_xfer = signal_free_time_ms == CEC_SIGNAL_FREE_TIME_RETRY;
282 	struct tegra_cec *cec = adap->priv;
283 	unsigned int i;
284 	u32 mode = 0;
285 	u32 mask;
286 
287 	if (cec_msg_is_broadcast(msg))
288 		mode = TEGRA_CEC_TX_REG_BCAST;
289 
290 	cec->tx_buf_cur = 0;
291 	cec->tx_buf_cnt = msg->len;
292 
293 	for (i = 0; i < msg->len; i++) {
294 		cec->tx_buf[i] = mode | msg->msg[i];
295 		if (i == 0)
296 			cec->tx_buf[i] |= TEGRA_CEC_TX_REG_START_BIT;
297 		if (i == msg->len - 1)
298 			cec->tx_buf[i] |= TEGRA_CEC_TX_REG_EOM;
299 		if (i == 0 && retry_xfer)
300 			cec->tx_buf[i] |= TEGRA_CEC_TX_REG_RETRY;
301 	}
302 
303 	mask = cec_read(cec, TEGRA_CEC_INT_MASK);
304 	cec_write(cec, TEGRA_CEC_INT_MASK,
305 		  mask | TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
306 
307 	return 0;
308 }
309 
310 static const struct cec_adap_ops tegra_cec_ops = {
311 	.adap_enable = tegra_cec_adap_enable,
312 	.adap_log_addr = tegra_cec_adap_log_addr,
313 	.adap_transmit = tegra_cec_adap_transmit,
314 	.adap_monitor_all_enable = tegra_cec_adap_monitor_all_enable,
315 };
316 
317 static int tegra_cec_probe(struct platform_device *pdev)
318 {
319 	struct device *hdmi_dev;
320 	struct tegra_cec *cec;
321 	struct resource *res;
322 	int ret = 0;
323 
324 	hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
325 
326 	if (IS_ERR(hdmi_dev))
327 		return PTR_ERR(hdmi_dev);
328 
329 	cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
330 
331 	if (!cec)
332 		return -ENOMEM;
333 
334 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
335 
336 	if (!res) {
337 		dev_err(&pdev->dev,
338 			"Unable to allocate resources for device\n");
339 		return -EBUSY;
340 	}
341 
342 	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
343 		pdev->name)) {
344 		dev_err(&pdev->dev,
345 			"Unable to request mem region for device\n");
346 		return -EBUSY;
347 	}
348 
349 	cec->tegra_cec_irq = platform_get_irq(pdev, 0);
350 
351 	if (cec->tegra_cec_irq <= 0)
352 		return -EBUSY;
353 
354 	cec->cec_base = devm_ioremap(&pdev->dev, res->start,
355 					     resource_size(res));
356 
357 	if (!cec->cec_base) {
358 		dev_err(&pdev->dev, "Unable to grab IOs for device\n");
359 		return -EBUSY;
360 	}
361 
362 	cec->clk = devm_clk_get(&pdev->dev, "cec");
363 
364 	if (IS_ERR_OR_NULL(cec->clk)) {
365 		dev_err(&pdev->dev, "Can't get clock for CEC\n");
366 		return -ENOENT;
367 	}
368 
369 	ret = clk_prepare_enable(cec->clk);
370 	if (ret) {
371 		dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
372 		return ret;
373 	}
374 
375 	/* set context info. */
376 	cec->dev = &pdev->dev;
377 
378 	platform_set_drvdata(pdev, cec);
379 
380 	ret = devm_request_threaded_irq(&pdev->dev, cec->tegra_cec_irq,
381 		tegra_cec_irq_handler, tegra_cec_irq_thread_handler,
382 		0, "cec_irq", &pdev->dev);
383 
384 	if (ret) {
385 		dev_err(&pdev->dev,
386 			"Unable to request interrupt for device\n");
387 		goto err_clk;
388 	}
389 
390 	cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME,
391 			CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL |
392 			CEC_CAP_CONNECTOR_INFO,
393 			CEC_MAX_LOG_ADDRS);
394 	if (IS_ERR(cec->adap)) {
395 		ret = -ENOMEM;
396 		dev_err(&pdev->dev, "Couldn't create cec adapter\n");
397 		goto err_clk;
398 	}
399 
400 	cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
401 						       cec->adap);
402 	if (!cec->notifier) {
403 		ret = -ENOMEM;
404 		goto err_adapter;
405 	}
406 
407 	ret = cec_register_adapter(cec->adap, &pdev->dev);
408 	if (ret) {
409 		dev_err(&pdev->dev, "Couldn't register device\n");
410 		goto err_notifier;
411 	}
412 
413 	return 0;
414 
415 err_notifier:
416 	cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
417 err_adapter:
418 	cec_delete_adapter(cec->adap);
419 err_clk:
420 	clk_disable_unprepare(cec->clk);
421 	return ret;
422 }
423 
424 static int tegra_cec_remove(struct platform_device *pdev)
425 {
426 	struct tegra_cec *cec = platform_get_drvdata(pdev);
427 
428 	clk_disable_unprepare(cec->clk);
429 
430 	cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
431 	cec_unregister_adapter(cec->adap);
432 
433 	return 0;
434 }
435 
436 #ifdef CONFIG_PM
437 static int tegra_cec_suspend(struct platform_device *pdev, pm_message_t state)
438 {
439 	struct tegra_cec *cec = platform_get_drvdata(pdev);
440 
441 	clk_disable_unprepare(cec->clk);
442 
443 	dev_notice(&pdev->dev, "suspended\n");
444 	return 0;
445 }
446 
447 static int tegra_cec_resume(struct platform_device *pdev)
448 {
449 	struct tegra_cec *cec = platform_get_drvdata(pdev);
450 
451 	dev_notice(&pdev->dev, "Resuming\n");
452 
453 	return clk_prepare_enable(cec->clk);
454 }
455 #endif
456 
457 static const struct of_device_id tegra_cec_of_match[] = {
458 	{ .compatible = "nvidia,tegra114-cec", },
459 	{ .compatible = "nvidia,tegra124-cec", },
460 	{ .compatible = "nvidia,tegra210-cec", },
461 	{},
462 };
463 
464 static struct platform_driver tegra_cec_driver = {
465 	.driver = {
466 		.name = TEGRA_CEC_NAME,
467 		.of_match_table = of_match_ptr(tegra_cec_of_match),
468 	},
469 	.probe = tegra_cec_probe,
470 	.remove = tegra_cec_remove,
471 
472 #ifdef CONFIG_PM
473 	.suspend = tegra_cec_suspend,
474 	.resume = tegra_cec_resume,
475 #endif
476 };
477 
478 module_platform_driver(tegra_cec_driver);
479 
480 MODULE_DESCRIPTION("Tegra HDMI CEC driver");
481 MODULE_AUTHOR("NVIDIA CORPORATION");
482 MODULE_AUTHOR("Cisco Systems, Inc. and/or its affiliates");
483 MODULE_LICENSE("GPL v2");
484