xref: /linux/drivers/media/rc/rc-ir-raw.c (revision b17ec78a)
1 /* rc-ir-raw.c - handle IR pulse/space events
2  *
3  * Copyright (C) 2010 by Mauro Carvalho Chehab
4  *
5  * This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation version 2 of the License.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *  GNU General Public License for more details.
13  */
14 
15 #include <linux/export.h>
16 #include <linux/kthread.h>
17 #include <linux/mutex.h>
18 #include <linux/kmod.h>
19 #include <linux/sched.h>
20 #include "rc-core-priv.h"
21 
22 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
23 static LIST_HEAD(ir_raw_client_list);
24 
25 /* Used to handle IR raw handler extensions */
26 static DEFINE_MUTEX(ir_raw_handler_lock);
27 static LIST_HEAD(ir_raw_handler_list);
28 static atomic64_t available_protocols = ATOMIC64_INIT(0);
29 
30 static int ir_raw_event_thread(void *data)
31 {
32 	struct ir_raw_event ev;
33 	struct ir_raw_handler *handler;
34 	struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
35 
36 	while (1) {
37 		mutex_lock(&ir_raw_handler_lock);
38 		while (kfifo_out(&raw->kfifo, &ev, 1)) {
39 			list_for_each_entry(handler, &ir_raw_handler_list, list)
40 				if (raw->dev->enabled_protocols &
41 				    handler->protocols || !handler->protocols)
42 					handler->decode(raw->dev, ev);
43 			raw->prev_ev = ev;
44 		}
45 		mutex_unlock(&ir_raw_handler_lock);
46 
47 		set_current_state(TASK_INTERRUPTIBLE);
48 
49 		if (kthread_should_stop()) {
50 			__set_current_state(TASK_RUNNING);
51 			break;
52 		} else if (!kfifo_is_empty(&raw->kfifo))
53 			set_current_state(TASK_RUNNING);
54 
55 		schedule();
56 	}
57 
58 	return 0;
59 }
60 
61 /**
62  * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
63  * @dev:	the struct rc_dev device descriptor
64  * @ev:		the struct ir_raw_event descriptor of the pulse/space
65  *
66  * This routine (which may be called from an interrupt context) stores a
67  * pulse/space duration for the raw ir decoding state machines. Pulses are
68  * signalled as positive values and spaces as negative values. A zero value
69  * will reset the decoding state machines.
70  */
71 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
72 {
73 	if (!dev->raw)
74 		return -EINVAL;
75 
76 	IR_dprintk(2, "sample: (%05dus %s)\n",
77 		   TO_US(ev->duration), TO_STR(ev->pulse));
78 
79 	if (!kfifo_put(&dev->raw->kfifo, *ev)) {
80 		dev_err(&dev->dev, "IR event FIFO is full!\n");
81 		return -ENOSPC;
82 	}
83 
84 	return 0;
85 }
86 EXPORT_SYMBOL_GPL(ir_raw_event_store);
87 
88 /**
89  * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
90  * @dev:	the struct rc_dev device descriptor
91  * @pulse:	true for pulse, false for space
92  *
93  * This routine (which may be called from an interrupt context) is used to
94  * store the beginning of an ir pulse or space (or the start/end of ir
95  * reception) for the raw ir decoding state machines. This is used by
96  * hardware which does not provide durations directly but only interrupts
97  * (or similar events) on state change.
98  */
99 int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
100 {
101 	ktime_t			now;
102 	DEFINE_IR_RAW_EVENT(ev);
103 	int			rc = 0;
104 
105 	if (!dev->raw)
106 		return -EINVAL;
107 
108 	now = ktime_get();
109 	ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
110 	ev.pulse = !pulse;
111 
112 	rc = ir_raw_event_store(dev, &ev);
113 
114 	dev->raw->last_event = now;
115 
116 	/* timer could be set to timeout (125ms by default) */
117 	if (!timer_pending(&dev->raw->edge_handle) ||
118 	    time_after(dev->raw->edge_handle.expires,
119 		       jiffies + msecs_to_jiffies(15))) {
120 		mod_timer(&dev->raw->edge_handle,
121 			  jiffies + msecs_to_jiffies(15));
122 	}
123 
124 	return rc;
125 }
126 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
127 
128 /**
129  * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
130  * @dev:	the struct rc_dev device descriptor
131  * @type:	the type of the event that has occurred
132  *
133  * This routine (which may be called from an interrupt context) works
134  * in similar manner to ir_raw_event_store_edge.
135  * This routine is intended for devices with limited internal buffer
136  * It automerges samples of same type, and handles timeouts. Returns non-zero
137  * if the event was added, and zero if the event was ignored due to idle
138  * processing.
139  */
140 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
141 {
142 	if (!dev->raw)
143 		return -EINVAL;
144 
145 	/* Ignore spaces in idle mode */
146 	if (dev->idle && !ev->pulse)
147 		return 0;
148 	else if (dev->idle)
149 		ir_raw_event_set_idle(dev, false);
150 
151 	if (!dev->raw->this_ev.duration)
152 		dev->raw->this_ev = *ev;
153 	else if (ev->pulse == dev->raw->this_ev.pulse)
154 		dev->raw->this_ev.duration += ev->duration;
155 	else {
156 		ir_raw_event_store(dev, &dev->raw->this_ev);
157 		dev->raw->this_ev = *ev;
158 	}
159 
160 	/* Enter idle mode if nessesary */
161 	if (!ev->pulse && dev->timeout &&
162 	    dev->raw->this_ev.duration >= dev->timeout)
163 		ir_raw_event_set_idle(dev, true);
164 
165 	return 1;
166 }
167 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
168 
169 /**
170  * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
171  * @dev:	the struct rc_dev device descriptor
172  * @idle:	whether the device is idle or not
173  */
174 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
175 {
176 	if (!dev->raw)
177 		return;
178 
179 	IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
180 
181 	if (idle) {
182 		dev->raw->this_ev.timeout = true;
183 		ir_raw_event_store(dev, &dev->raw->this_ev);
184 		init_ir_raw_event(&dev->raw->this_ev);
185 	}
186 
187 	if (dev->s_idle)
188 		dev->s_idle(dev, idle);
189 
190 	dev->idle = idle;
191 }
192 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
193 
194 /**
195  * ir_raw_event_handle() - schedules the decoding of stored ir data
196  * @dev:	the struct rc_dev device descriptor
197  *
198  * This routine will tell rc-core to start decoding stored ir data.
199  */
200 void ir_raw_event_handle(struct rc_dev *dev)
201 {
202 	if (!dev->raw || !dev->raw->thread)
203 		return;
204 
205 	wake_up_process(dev->raw->thread);
206 }
207 EXPORT_SYMBOL_GPL(ir_raw_event_handle);
208 
209 /* used internally by the sysfs interface */
210 u64
211 ir_raw_get_allowed_protocols(void)
212 {
213 	return atomic64_read(&available_protocols);
214 }
215 
216 static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
217 {
218 	/* the caller will update dev->enabled_protocols */
219 	return 0;
220 }
221 
222 static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
223 {
224 	mutex_lock(&dev->lock);
225 	dev->enabled_protocols &= ~protocols;
226 	mutex_unlock(&dev->lock);
227 }
228 
229 /**
230  * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
231  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
232  *		each raw event filled.
233  * @max:	Maximum number of raw events to fill.
234  * @timings:	Manchester modulation timings.
235  * @n:		Number of bits of data.
236  * @data:	Data bits to encode.
237  *
238  * Encodes the @n least significant bits of @data using Manchester (bi-phase)
239  * modulation with the timing characteristics described by @timings, writing up
240  * to @max raw IR events using the *@ev pointer.
241  *
242  * Returns:	0 on success.
243  *		-ENOBUFS if there isn't enough space in the array to fit the
244  *		full encoded data. In this case all @max events will have been
245  *		written.
246  */
247 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
248 			  const struct ir_raw_timings_manchester *timings,
249 			  unsigned int n, u64 data)
250 {
251 	bool need_pulse;
252 	u64 i;
253 	int ret = -ENOBUFS;
254 
255 	i = BIT_ULL(n - 1);
256 
257 	if (timings->leader) {
258 		if (!max--)
259 			return ret;
260 		if (timings->pulse_space_start) {
261 			init_ir_raw_event_duration((*ev)++, 1, timings->leader);
262 
263 			if (!max--)
264 				return ret;
265 			init_ir_raw_event_duration((*ev), 0, timings->leader);
266 		} else {
267 			init_ir_raw_event_duration((*ev), 1, timings->leader);
268 		}
269 		i >>= 1;
270 	} else {
271 		/* continue existing signal */
272 		--(*ev);
273 	}
274 	/* from here on *ev will point to the last event rather than the next */
275 
276 	while (n && i > 0) {
277 		need_pulse = !(data & i);
278 		if (timings->invert)
279 			need_pulse = !need_pulse;
280 		if (need_pulse == !!(*ev)->pulse) {
281 			(*ev)->duration += timings->clock;
282 		} else {
283 			if (!max--)
284 				goto nobufs;
285 			init_ir_raw_event_duration(++(*ev), need_pulse,
286 						   timings->clock);
287 		}
288 
289 		if (!max--)
290 			goto nobufs;
291 		init_ir_raw_event_duration(++(*ev), !need_pulse,
292 					   timings->clock);
293 		i >>= 1;
294 	}
295 
296 	if (timings->trailer_space) {
297 		if (!(*ev)->pulse)
298 			(*ev)->duration += timings->trailer_space;
299 		else if (!max--)
300 			goto nobufs;
301 		else
302 			init_ir_raw_event_duration(++(*ev), 0,
303 						   timings->trailer_space);
304 	}
305 
306 	ret = 0;
307 nobufs:
308 	/* point to the next event rather than last event before returning */
309 	++(*ev);
310 	return ret;
311 }
312 EXPORT_SYMBOL(ir_raw_gen_manchester);
313 
314 /**
315  * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
316  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
317  *		each raw event filled.
318  * @max:	Maximum number of raw events to fill.
319  * @timings:	Pulse distance modulation timings.
320  * @n:		Number of bits of data.
321  * @data:	Data bits to encode.
322  *
323  * Encodes the @n least significant bits of @data using pulse-distance
324  * modulation with the timing characteristics described by @timings, writing up
325  * to @max raw IR events using the *@ev pointer.
326  *
327  * Returns:	0 on success.
328  *		-ENOBUFS if there isn't enough space in the array to fit the
329  *		full encoded data. In this case all @max events will have been
330  *		written.
331  */
332 int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
333 		  const struct ir_raw_timings_pd *timings,
334 		  unsigned int n, u64 data)
335 {
336 	int i;
337 	int ret;
338 	unsigned int space;
339 
340 	if (timings->header_pulse) {
341 		ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
342 					     timings->header_space);
343 		if (ret)
344 			return ret;
345 	}
346 
347 	if (timings->msb_first) {
348 		for (i = n - 1; i >= 0; --i) {
349 			space = timings->bit_space[(data >> i) & 1];
350 			ret = ir_raw_gen_pulse_space(ev, &max,
351 						     timings->bit_pulse,
352 						     space);
353 			if (ret)
354 				return ret;
355 		}
356 	} else {
357 		for (i = 0; i < n; ++i, data >>= 1) {
358 			space = timings->bit_space[data & 1];
359 			ret = ir_raw_gen_pulse_space(ev, &max,
360 						     timings->bit_pulse,
361 						     space);
362 			if (ret)
363 				return ret;
364 		}
365 	}
366 
367 	ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
368 				     timings->trailer_space);
369 	return ret;
370 }
371 EXPORT_SYMBOL(ir_raw_gen_pd);
372 
373 /**
374  * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
375  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
376  *		each raw event filled.
377  * @max:	Maximum number of raw events to fill.
378  * @timings:	Pulse distance modulation timings.
379  * @n:		Number of bits of data.
380  * @data:	Data bits to encode.
381  *
382  * Encodes the @n least significant bits of @data using space-distance
383  * modulation with the timing characteristics described by @timings, writing up
384  * to @max raw IR events using the *@ev pointer.
385  *
386  * Returns:	0 on success.
387  *		-ENOBUFS if there isn't enough space in the array to fit the
388  *		full encoded data. In this case all @max events will have been
389  *		written.
390  */
391 int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
392 		  const struct ir_raw_timings_pl *timings,
393 		  unsigned int n, u64 data)
394 {
395 	int i;
396 	int ret = -ENOBUFS;
397 	unsigned int pulse;
398 
399 	if (!max--)
400 		return ret;
401 
402 	init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
403 
404 	if (timings->msb_first) {
405 		for (i = n - 1; i >= 0; --i) {
406 			if (!max--)
407 				return ret;
408 			init_ir_raw_event_duration((*ev)++, 0,
409 						   timings->bit_space);
410 			if (!max--)
411 				return ret;
412 			pulse = timings->bit_pulse[(data >> i) & 1];
413 			init_ir_raw_event_duration((*ev)++, 1, pulse);
414 		}
415 	} else {
416 		for (i = 0; i < n; ++i, data >>= 1) {
417 			if (!max--)
418 				return ret;
419 			init_ir_raw_event_duration((*ev)++, 0,
420 						   timings->bit_space);
421 			if (!max--)
422 				return ret;
423 			pulse = timings->bit_pulse[data & 1];
424 			init_ir_raw_event_duration((*ev)++, 1, pulse);
425 		}
426 	}
427 
428 	if (!max--)
429 		return ret;
430 
431 	init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
432 
433 	return 0;
434 }
435 EXPORT_SYMBOL(ir_raw_gen_pl);
436 
437 /**
438  * ir_raw_encode_scancode() - Encode a scancode as raw events
439  *
440  * @protocol:		protocol
441  * @scancode:		scancode filter describing a single scancode
442  * @events:		array of raw events to write into
443  * @max:		max number of raw events
444  *
445  * Attempts to encode the scancode as raw events.
446  *
447  * Returns:	The number of events written.
448  *		-ENOBUFS if there isn't enough space in the array to fit the
449  *		encoding. In this case all @max events will have been written.
450  *		-EINVAL if the scancode is ambiguous or invalid, or if no
451  *		compatible encoder was found.
452  */
453 int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
454 			   struct ir_raw_event *events, unsigned int max)
455 {
456 	struct ir_raw_handler *handler;
457 	int ret = -EINVAL;
458 	u64 mask = 1ULL << protocol;
459 
460 	mutex_lock(&ir_raw_handler_lock);
461 	list_for_each_entry(handler, &ir_raw_handler_list, list) {
462 		if (handler->protocols & mask && handler->encode) {
463 			ret = handler->encode(protocol, scancode, events, max);
464 			if (ret >= 0 || ret == -ENOBUFS)
465 				break;
466 		}
467 	}
468 	mutex_unlock(&ir_raw_handler_lock);
469 
470 	return ret;
471 }
472 EXPORT_SYMBOL(ir_raw_encode_scancode);
473 
474 static void edge_handle(struct timer_list *t)
475 {
476 	struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
477 	struct rc_dev *dev = raw->dev;
478 	ktime_t interval = ktime_sub(ktime_get(), dev->raw->last_event);
479 
480 	if (ktime_to_ns(interval) >= dev->timeout) {
481 		DEFINE_IR_RAW_EVENT(ev);
482 
483 		ev.timeout = true;
484 		ev.duration = ktime_to_ns(interval);
485 
486 		ir_raw_event_store(dev, &ev);
487 	} else {
488 		mod_timer(&dev->raw->edge_handle,
489 			  jiffies + nsecs_to_jiffies(dev->timeout -
490 						     ktime_to_ns(interval)));
491 	}
492 
493 	ir_raw_event_handle(dev);
494 }
495 
496 /*
497  * Used to (un)register raw event clients
498  */
499 int ir_raw_event_prepare(struct rc_dev *dev)
500 {
501 	static bool raw_init; /* 'false' default value, raw decoders loaded? */
502 
503 	if (!dev)
504 		return -EINVAL;
505 
506 	if (!raw_init) {
507 		request_module("ir-lirc-codec");
508 		raw_init = true;
509 	}
510 
511 	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
512 	if (!dev->raw)
513 		return -ENOMEM;
514 
515 	dev->raw->dev = dev;
516 	dev->change_protocol = change_protocol;
517 	timer_setup(&dev->raw->edge_handle, edge_handle, 0);
518 	INIT_KFIFO(dev->raw->kfifo);
519 
520 	return 0;
521 }
522 
523 int ir_raw_event_register(struct rc_dev *dev)
524 {
525 	struct ir_raw_handler *handler;
526 	struct task_struct *thread;
527 
528 	/*
529 	 * raw transmitters do not need any event registration
530 	 * because the event is coming from userspace
531 	 */
532 	if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
533 		thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
534 				     dev->minor);
535 
536 		if (IS_ERR(thread))
537 			return PTR_ERR(thread);
538 
539 		dev->raw->thread = thread;
540 	}
541 
542 	mutex_lock(&ir_raw_handler_lock);
543 	list_add_tail(&dev->raw->list, &ir_raw_client_list);
544 	list_for_each_entry(handler, &ir_raw_handler_list, list)
545 		if (handler->raw_register)
546 			handler->raw_register(dev);
547 	mutex_unlock(&ir_raw_handler_lock);
548 
549 	return 0;
550 }
551 
552 void ir_raw_event_free(struct rc_dev *dev)
553 {
554 	if (!dev)
555 		return;
556 
557 	kfree(dev->raw);
558 	dev->raw = NULL;
559 }
560 
561 void ir_raw_event_unregister(struct rc_dev *dev)
562 {
563 	struct ir_raw_handler *handler;
564 
565 	if (!dev || !dev->raw)
566 		return;
567 
568 	kthread_stop(dev->raw->thread);
569 	del_timer_sync(&dev->raw->edge_handle);
570 
571 	mutex_lock(&ir_raw_handler_lock);
572 	list_del(&dev->raw->list);
573 	list_for_each_entry(handler, &ir_raw_handler_list, list)
574 		if (handler->raw_unregister)
575 			handler->raw_unregister(dev);
576 	mutex_unlock(&ir_raw_handler_lock);
577 
578 	ir_raw_event_free(dev);
579 }
580 
581 /*
582  * Extension interface - used to register the IR decoders
583  */
584 
585 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
586 {
587 	struct ir_raw_event_ctrl *raw;
588 
589 	mutex_lock(&ir_raw_handler_lock);
590 	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
591 	if (ir_raw_handler->raw_register)
592 		list_for_each_entry(raw, &ir_raw_client_list, list)
593 			ir_raw_handler->raw_register(raw->dev);
594 	atomic64_or(ir_raw_handler->protocols, &available_protocols);
595 	mutex_unlock(&ir_raw_handler_lock);
596 
597 	return 0;
598 }
599 EXPORT_SYMBOL(ir_raw_handler_register);
600 
601 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
602 {
603 	struct ir_raw_event_ctrl *raw;
604 	u64 protocols = ir_raw_handler->protocols;
605 
606 	mutex_lock(&ir_raw_handler_lock);
607 	list_del(&ir_raw_handler->list);
608 	list_for_each_entry(raw, &ir_raw_client_list, list) {
609 		ir_raw_disable_protocols(raw->dev, protocols);
610 		if (ir_raw_handler->raw_unregister)
611 			ir_raw_handler->raw_unregister(raw->dev);
612 	}
613 	atomic64_andnot(protocols, &available_protocols);
614 	mutex_unlock(&ir_raw_handler_lock);
615 }
616 EXPORT_SYMBOL(ir_raw_handler_unregister);
617