1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2008 Nokia Corporation
4  *
5  *  Based on lirc_serial.c
6  */
7 #include <linux/clk.h>
8 #include <linux/module.h>
9 #include <linux/platform_device.h>
10 #include <linux/wait.h>
11 #include <linux/pwm.h>
12 #include <linux/of.h>
13 #include <linux/hrtimer.h>
14 
15 #include <media/rc-core.h>
16 
17 #define WBUF_LEN 256
18 
19 struct ir_rx51 {
20 	struct rc_dev *rcdev;
21 	struct pwm_device *pwm;
22 	struct hrtimer timer;
23 	struct device	     *dev;
24 	wait_queue_head_t     wqueue;
25 
26 	unsigned int	freq;		/* carrier frequency */
27 	unsigned int	duty_cycle;	/* carrier duty cycle */
28 	int		wbuf[WBUF_LEN];
29 	int		wbuf_index;
30 	unsigned long	device_is_open;
31 };
32 
ir_rx51_on(struct ir_rx51 * ir_rx51)33 static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
34 {
35 	pwm_enable(ir_rx51->pwm);
36 }
37 
ir_rx51_off(struct ir_rx51 * ir_rx51)38 static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
39 {
40 	pwm_disable(ir_rx51->pwm);
41 }
42 
init_timing_params(struct ir_rx51 * ir_rx51)43 static int init_timing_params(struct ir_rx51 *ir_rx51)
44 {
45 	struct pwm_device *pwm = ir_rx51->pwm;
46 	int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
47 
48 	duty = DIV_ROUND_CLOSEST(ir_rx51->duty_cycle * period, 100);
49 
50 	pwm_config(pwm, duty, period);
51 
52 	return 0;
53 }
54 
ir_rx51_timer_cb(struct hrtimer * timer)55 static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
56 {
57 	struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
58 	ktime_t now;
59 
60 	if (ir_rx51->wbuf_index < 0) {
61 		dev_err_ratelimited(ir_rx51->dev,
62 				    "BUG wbuf_index has value of %i\n",
63 				    ir_rx51->wbuf_index);
64 		goto end;
65 	}
66 
67 	/*
68 	 * If we happen to hit an odd latency spike, loop through the
69 	 * pulses until we catch up.
70 	 */
71 	do {
72 		u64 ns;
73 
74 		if (ir_rx51->wbuf_index >= WBUF_LEN)
75 			goto end;
76 		if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
77 			goto end;
78 
79 		if (ir_rx51->wbuf_index % 2)
80 			ir_rx51_off(ir_rx51);
81 		else
82 			ir_rx51_on(ir_rx51);
83 
84 		ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
85 		hrtimer_add_expires_ns(timer, ns);
86 
87 		ir_rx51->wbuf_index++;
88 
89 		now = timer->base->get_time();
90 
91 	} while (hrtimer_get_expires_tv64(timer) < now);
92 
93 	return HRTIMER_RESTART;
94 end:
95 	/* Stop TX here */
96 	ir_rx51_off(ir_rx51);
97 	ir_rx51->wbuf_index = -1;
98 
99 	wake_up_interruptible(&ir_rx51->wqueue);
100 
101 	return HRTIMER_NORESTART;
102 }
103 
ir_rx51_tx(struct rc_dev * dev,unsigned int * buffer,unsigned int count)104 static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
105 		      unsigned int count)
106 {
107 	struct ir_rx51 *ir_rx51 = dev->priv;
108 
109 	if (count > WBUF_LEN)
110 		return -EINVAL;
111 
112 	memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
113 
114 	/* Wait any pending transfers to finish */
115 	wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
116 
117 	init_timing_params(ir_rx51);
118 	if (count < WBUF_LEN)
119 		ir_rx51->wbuf[count] = -1; /* Insert termination mark */
120 
121 	/*
122 	 * REVISIT: Adjust latency requirements so the device doesn't go in too
123 	 * deep sleep states with pm_qos_add_request().
124 	 */
125 
126 	ir_rx51_on(ir_rx51);
127 	ir_rx51->wbuf_index = 1;
128 	hrtimer_start(&ir_rx51->timer,
129 		      ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
130 		      HRTIMER_MODE_REL);
131 	/*
132 	 * Don't return back to the userspace until the transfer has
133 	 * finished
134 	 */
135 	wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
136 
137 	/* REVISIT: Remove pm_qos constraint, we can sleep again */
138 
139 	return count;
140 }
141 
ir_rx51_open(struct rc_dev * dev)142 static int ir_rx51_open(struct rc_dev *dev)
143 {
144 	struct ir_rx51 *ir_rx51 = dev->priv;
145 
146 	if (test_and_set_bit(1, &ir_rx51->device_is_open))
147 		return -EBUSY;
148 
149 	ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
150 	if (IS_ERR(ir_rx51->pwm)) {
151 		int res = PTR_ERR(ir_rx51->pwm);
152 
153 		dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
154 		return res;
155 	}
156 
157 	return 0;
158 }
159 
ir_rx51_release(struct rc_dev * dev)160 static void ir_rx51_release(struct rc_dev *dev)
161 {
162 	struct ir_rx51 *ir_rx51 = dev->priv;
163 
164 	hrtimer_cancel(&ir_rx51->timer);
165 	ir_rx51_off(ir_rx51);
166 	pwm_put(ir_rx51->pwm);
167 
168 	clear_bit(1, &ir_rx51->device_is_open);
169 }
170 
171 static struct ir_rx51 ir_rx51 = {
172 	.duty_cycle	= 50,
173 	.wbuf_index	= -1,
174 };
175 
ir_rx51_set_duty_cycle(struct rc_dev * dev,u32 duty)176 static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
177 {
178 	struct ir_rx51 *ir_rx51 = dev->priv;
179 
180 	ir_rx51->duty_cycle = duty;
181 
182 	return 0;
183 }
184 
ir_rx51_set_tx_carrier(struct rc_dev * dev,u32 carrier)185 static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
186 {
187 	struct ir_rx51 *ir_rx51 = dev->priv;
188 
189 	if (carrier > 500000 || carrier < 20000)
190 		return -EINVAL;
191 
192 	ir_rx51->freq = carrier;
193 
194 	return 0;
195 }
196 
197 #ifdef CONFIG_PM
198 
ir_rx51_suspend(struct platform_device * dev,pm_message_t state)199 static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
200 {
201 	/*
202 	 * In case the device is still open, do not suspend. Normally
203 	 * this should not be a problem as lircd only keeps the device
204 	 * open only for short periods of time. We also don't want to
205 	 * get involved with race conditions that might happen if we
206 	 * were in a middle of a transmit. Thus, we defer any suspend
207 	 * actions until transmit has completed.
208 	 */
209 	if (test_and_set_bit(1, &ir_rx51.device_is_open))
210 		return -EAGAIN;
211 
212 	clear_bit(1, &ir_rx51.device_is_open);
213 
214 	return 0;
215 }
216 
ir_rx51_resume(struct platform_device * dev)217 static int ir_rx51_resume(struct platform_device *dev)
218 {
219 	return 0;
220 }
221 
222 #else
223 
224 #define ir_rx51_suspend	NULL
225 #define ir_rx51_resume	NULL
226 
227 #endif /* CONFIG_PM */
228 
ir_rx51_probe(struct platform_device * dev)229 static int ir_rx51_probe(struct platform_device *dev)
230 {
231 	struct pwm_device *pwm;
232 	struct rc_dev *rcdev;
233 
234 	pwm = pwm_get(&dev->dev, NULL);
235 	if (IS_ERR(pwm)) {
236 		int err = PTR_ERR(pwm);
237 
238 		if (err != -EPROBE_DEFER)
239 			dev_err(&dev->dev, "pwm_get failed: %d\n", err);
240 		return err;
241 	}
242 
243 	/* Use default, in case userspace does not set the carrier */
244 	ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
245 	pwm_put(pwm);
246 
247 	hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
248 	ir_rx51.timer.function = ir_rx51_timer_cb;
249 
250 	ir_rx51.dev = &dev->dev;
251 
252 	rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
253 	if (!rcdev)
254 		return -ENOMEM;
255 
256 	rcdev->priv = &ir_rx51;
257 	rcdev->open = ir_rx51_open;
258 	rcdev->close = ir_rx51_release;
259 	rcdev->tx_ir = ir_rx51_tx;
260 	rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
261 	rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
262 	rcdev->driver_name = KBUILD_MODNAME;
263 
264 	ir_rx51.rcdev = rcdev;
265 
266 	return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
267 }
268 
ir_rx51_remove(struct platform_device * dev)269 static int ir_rx51_remove(struct platform_device *dev)
270 {
271 	return 0;
272 }
273 
274 static const struct of_device_id ir_rx51_match[] = {
275 	{
276 		.compatible = "nokia,n900-ir",
277 	},
278 	{},
279 };
280 MODULE_DEVICE_TABLE(of, ir_rx51_match);
281 
282 static struct platform_driver ir_rx51_platform_driver = {
283 	.probe		= ir_rx51_probe,
284 	.remove		= ir_rx51_remove,
285 	.suspend	= ir_rx51_suspend,
286 	.resume		= ir_rx51_resume,
287 	.driver		= {
288 		.name	= KBUILD_MODNAME,
289 		.of_match_table = of_match_ptr(ir_rx51_match),
290 	},
291 };
292 module_platform_driver(ir_rx51_platform_driver);
293 
294 MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
295 MODULE_AUTHOR("Nokia Corporation");
296 MODULE_LICENSE("GPL");
297