1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  ALSA sequencer Client Manager
4  *  Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
5  *                             Jaroslav Kysela <perex@perex.cz>
6  *                             Takashi Iwai <tiwai@suse.de>
7  */
8 
9 #include <linux/init.h>
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <sound/core.h>
13 #include <sound/minors.h>
14 #include <linux/kmod.h>
15 
16 #include <sound/seq_kernel.h>
17 #include "seq_clientmgr.h"
18 #include "seq_memory.h"
19 #include "seq_queue.h"
20 #include "seq_timer.h"
21 #include "seq_info.h"
22 #include "seq_system.h"
23 #include <sound/seq_device.h>
24 #ifdef CONFIG_COMPAT
25 #include <linux/compat.h>
26 #endif
27 
28 /* Client Manager
29 
30  * this module handles the connections of userland and kernel clients
31  *
32  */
33 
34 /*
35  * There are four ranges of client numbers (last two shared):
36  * 0..15: global clients
37  * 16..127: statically allocated client numbers for cards 0..27
38  * 128..191: dynamically allocated client numbers for cards 28..31
39  * 128..191: dynamically allocated client numbers for applications
40  */
41 
42 /* number of kernel non-card clients */
43 #define SNDRV_SEQ_GLOBAL_CLIENTS	16
44 /* clients per cards, for static clients */
45 #define SNDRV_SEQ_CLIENTS_PER_CARD	4
46 /* dynamically allocated client numbers (both kernel drivers and user space) */
47 #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN	128
48 
49 #define SNDRV_SEQ_LFLG_INPUT	0x0001
50 #define SNDRV_SEQ_LFLG_OUTPUT	0x0002
51 #define SNDRV_SEQ_LFLG_OPEN	(SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
52 
53 static DEFINE_SPINLOCK(clients_lock);
54 static DEFINE_MUTEX(register_mutex);
55 
56 /*
57  * client table
58  */
59 static char clienttablock[SNDRV_SEQ_MAX_CLIENTS];
60 static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS];
61 static struct snd_seq_usage client_usage;
62 
63 /*
64  * prototypes
65  */
66 static int bounce_error_event(struct snd_seq_client *client,
67 			      struct snd_seq_event *event,
68 			      int err, int atomic, int hop);
69 static int snd_seq_deliver_single_event(struct snd_seq_client *client,
70 					struct snd_seq_event *event,
71 					int filter, int atomic, int hop);
72 
73 /*
74  */
snd_seq_file_flags(struct file * file)75 static inline unsigned short snd_seq_file_flags(struct file *file)
76 {
77         switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
78         case FMODE_WRITE:
79                 return SNDRV_SEQ_LFLG_OUTPUT;
80         case FMODE_READ:
81                 return SNDRV_SEQ_LFLG_INPUT;
82         default:
83                 return SNDRV_SEQ_LFLG_OPEN;
84         }
85 }
86 
snd_seq_write_pool_allocated(struct snd_seq_client * client)87 static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client)
88 {
89 	return snd_seq_total_cells(client->pool) > 0;
90 }
91 
92 /* return pointer to client structure for specified id */
clientptr(int clientid)93 static struct snd_seq_client *clientptr(int clientid)
94 {
95 	if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
96 		pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
97 			   clientid);
98 		return NULL;
99 	}
100 	return clienttab[clientid];
101 }
102 
snd_seq_client_use_ptr(int clientid)103 struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
104 {
105 	unsigned long flags;
106 	struct snd_seq_client *client;
107 
108 	if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
109 		pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
110 			   clientid);
111 		return NULL;
112 	}
113 	spin_lock_irqsave(&clients_lock, flags);
114 	client = clientptr(clientid);
115 	if (client)
116 		goto __lock;
117 	if (clienttablock[clientid]) {
118 		spin_unlock_irqrestore(&clients_lock, flags);
119 		return NULL;
120 	}
121 	spin_unlock_irqrestore(&clients_lock, flags);
122 #ifdef CONFIG_MODULES
123 	if (!in_interrupt()) {
124 		static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
125 		static char card_requested[SNDRV_CARDS];
126 		if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
127 			int idx;
128 
129 			if (!client_requested[clientid]) {
130 				client_requested[clientid] = 1;
131 				for (idx = 0; idx < 15; idx++) {
132 					if (seq_client_load[idx] < 0)
133 						break;
134 					if (seq_client_load[idx] == clientid) {
135 						request_module("snd-seq-client-%i",
136 							       clientid);
137 						break;
138 					}
139 				}
140 			}
141 		} else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) {
142 			int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
143 				SNDRV_SEQ_CLIENTS_PER_CARD;
144 			if (card < snd_ecards_limit) {
145 				if (! card_requested[card]) {
146 					card_requested[card] = 1;
147 					snd_request_card(card);
148 				}
149 				snd_seq_device_load_drivers();
150 			}
151 		}
152 		spin_lock_irqsave(&clients_lock, flags);
153 		client = clientptr(clientid);
154 		if (client)
155 			goto __lock;
156 		spin_unlock_irqrestore(&clients_lock, flags);
157 	}
158 #endif
159 	return NULL;
160 
161       __lock:
162 	snd_use_lock_use(&client->use_lock);
163 	spin_unlock_irqrestore(&clients_lock, flags);
164 	return client;
165 }
166 
167 /* Take refcount and perform ioctl_mutex lock on the given client;
168  * used only for OSS sequencer
169  * Unlock via snd_seq_client_ioctl_unlock() below
170  */
snd_seq_client_ioctl_lock(int clientid)171 bool snd_seq_client_ioctl_lock(int clientid)
172 {
173 	struct snd_seq_client *client;
174 
175 	client = snd_seq_client_use_ptr(clientid);
176 	if (!client)
177 		return false;
178 	mutex_lock(&client->ioctl_mutex);
179 	/* The client isn't unrefed here; see snd_seq_client_ioctl_unlock() */
180 	return true;
181 }
182 EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_lock);
183 
184 /* Unlock and unref the given client; for OSS sequencer use only */
snd_seq_client_ioctl_unlock(int clientid)185 void snd_seq_client_ioctl_unlock(int clientid)
186 {
187 	struct snd_seq_client *client;
188 
189 	client = snd_seq_client_use_ptr(clientid);
190 	if (WARN_ON(!client))
191 		return;
192 	mutex_unlock(&client->ioctl_mutex);
193 	/* The doubly unrefs below are intentional; the first one releases the
194 	 * leftover from snd_seq_client_ioctl_lock() above, and the second one
195 	 * is for releasing snd_seq_client_use_ptr() in this function
196 	 */
197 	snd_seq_client_unlock(client);
198 	snd_seq_client_unlock(client);
199 }
200 EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_unlock);
201 
usage_alloc(struct snd_seq_usage * res,int num)202 static void usage_alloc(struct snd_seq_usage *res, int num)
203 {
204 	res->cur += num;
205 	if (res->cur > res->peak)
206 		res->peak = res->cur;
207 }
208 
usage_free(struct snd_seq_usage * res,int num)209 static void usage_free(struct snd_seq_usage *res, int num)
210 {
211 	res->cur -= num;
212 }
213 
214 /* initialise data structures */
client_init_data(void)215 int __init client_init_data(void)
216 {
217 	/* zap out the client table */
218 	memset(&clienttablock, 0, sizeof(clienttablock));
219 	memset(&clienttab, 0, sizeof(clienttab));
220 	return 0;
221 }
222 
223 
seq_create_client1(int client_index,int poolsize)224 static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
225 {
226 	int c;
227 	struct snd_seq_client *client;
228 
229 	/* init client data */
230 	client = kzalloc(sizeof(*client), GFP_KERNEL);
231 	if (client == NULL)
232 		return NULL;
233 	client->pool = snd_seq_pool_new(poolsize);
234 	if (client->pool == NULL) {
235 		kfree(client);
236 		return NULL;
237 	}
238 	client->type = NO_CLIENT;
239 	snd_use_lock_init(&client->use_lock);
240 	rwlock_init(&client->ports_lock);
241 	mutex_init(&client->ports_mutex);
242 	INIT_LIST_HEAD(&client->ports_list_head);
243 	mutex_init(&client->ioctl_mutex);
244 
245 	/* find free slot in the client table */
246 	spin_lock_irq(&clients_lock);
247 	if (client_index < 0) {
248 		for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN;
249 		     c < SNDRV_SEQ_MAX_CLIENTS;
250 		     c++) {
251 			if (clienttab[c] || clienttablock[c])
252 				continue;
253 			clienttab[client->number = c] = client;
254 			spin_unlock_irq(&clients_lock);
255 			return client;
256 		}
257 	} else {
258 		if (clienttab[client_index] == NULL && !clienttablock[client_index]) {
259 			clienttab[client->number = client_index] = client;
260 			spin_unlock_irq(&clients_lock);
261 			return client;
262 		}
263 	}
264 	spin_unlock_irq(&clients_lock);
265 	snd_seq_pool_delete(&client->pool);
266 	kfree(client);
267 	return NULL;	/* no free slot found or busy, return failure code */
268 }
269 
270 
seq_free_client1(struct snd_seq_client * client)271 static int seq_free_client1(struct snd_seq_client *client)
272 {
273 	if (!client)
274 		return 0;
275 	spin_lock_irq(&clients_lock);
276 	clienttablock[client->number] = 1;
277 	clienttab[client->number] = NULL;
278 	spin_unlock_irq(&clients_lock);
279 	snd_seq_delete_all_ports(client);
280 	snd_seq_queue_client_leave(client->number);
281 	snd_use_lock_sync(&client->use_lock);
282 	if (client->pool)
283 		snd_seq_pool_delete(&client->pool);
284 	spin_lock_irq(&clients_lock);
285 	clienttablock[client->number] = 0;
286 	spin_unlock_irq(&clients_lock);
287 	return 0;
288 }
289 
290 
seq_free_client(struct snd_seq_client * client)291 static void seq_free_client(struct snd_seq_client * client)
292 {
293 	mutex_lock(&register_mutex);
294 	switch (client->type) {
295 	case NO_CLIENT:
296 		pr_warn("ALSA: seq: Trying to free unused client %d\n",
297 			client->number);
298 		break;
299 	case USER_CLIENT:
300 	case KERNEL_CLIENT:
301 		seq_free_client1(client);
302 		usage_free(&client_usage, 1);
303 		break;
304 
305 	default:
306 		pr_err("ALSA: seq: Trying to free client %d with undefined type = %d\n",
307 			   client->number, client->type);
308 	}
309 	mutex_unlock(&register_mutex);
310 
311 	snd_seq_system_client_ev_client_exit(client->number);
312 }
313 
314 
315 
316 /* -------------------------------------------------------- */
317 
318 /* create a user client */
snd_seq_open(struct inode * inode,struct file * file)319 static int snd_seq_open(struct inode *inode, struct file *file)
320 {
321 	int c, mode;			/* client id */
322 	struct snd_seq_client *client;
323 	struct snd_seq_user_client *user;
324 	int err;
325 
326 	err = stream_open(inode, file);
327 	if (err < 0)
328 		return err;
329 
330 	mutex_lock(&register_mutex);
331 	client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
332 	if (!client) {
333 		mutex_unlock(&register_mutex);
334 		return -ENOMEM;	/* failure code */
335 	}
336 
337 	mode = snd_seq_file_flags(file);
338 	if (mode & SNDRV_SEQ_LFLG_INPUT)
339 		client->accept_input = 1;
340 	if (mode & SNDRV_SEQ_LFLG_OUTPUT)
341 		client->accept_output = 1;
342 
343 	user = &client->data.user;
344 	user->fifo = NULL;
345 	user->fifo_pool_size = 0;
346 
347 	if (mode & SNDRV_SEQ_LFLG_INPUT) {
348 		user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
349 		user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
350 		if (user->fifo == NULL) {
351 			seq_free_client1(client);
352 			kfree(client);
353 			mutex_unlock(&register_mutex);
354 			return -ENOMEM;
355 		}
356 	}
357 
358 	usage_alloc(&client_usage, 1);
359 	client->type = USER_CLIENT;
360 	mutex_unlock(&register_mutex);
361 
362 	c = client->number;
363 	file->private_data = client;
364 
365 	/* fill client data */
366 	user->file = file;
367 	sprintf(client->name, "Client-%d", c);
368 	client->data.user.owner = get_pid(task_pid(current));
369 
370 	/* make others aware this new client */
371 	snd_seq_system_client_ev_client_start(c);
372 
373 	return 0;
374 }
375 
376 /* delete a user client */
snd_seq_release(struct inode * inode,struct file * file)377 static int snd_seq_release(struct inode *inode, struct file *file)
378 {
379 	struct snd_seq_client *client = file->private_data;
380 
381 	if (client) {
382 		seq_free_client(client);
383 		if (client->data.user.fifo)
384 			snd_seq_fifo_delete(&client->data.user.fifo);
385 		put_pid(client->data.user.owner);
386 		kfree(client);
387 	}
388 
389 	return 0;
390 }
391 
392 
393 /* handle client read() */
394 /* possible error values:
395  *	-ENXIO	invalid client or file open mode
396  *	-ENOSPC	FIFO overflow (the flag is cleared after this error report)
397  *	-EINVAL	no enough user-space buffer to write the whole event
398  *	-EFAULT	seg. fault during copy to user space
399  */
snd_seq_read(struct file * file,char __user * buf,size_t count,loff_t * offset)400 static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
401 			    loff_t *offset)
402 {
403 	struct snd_seq_client *client = file->private_data;
404 	struct snd_seq_fifo *fifo;
405 	int err;
406 	long result = 0;
407 	struct snd_seq_event_cell *cell;
408 
409 	if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT))
410 		return -ENXIO;
411 
412 	if (!access_ok(buf, count))
413 		return -EFAULT;
414 
415 	/* check client structures are in place */
416 	if (snd_BUG_ON(!client))
417 		return -ENXIO;
418 
419 	if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
420 		return -ENXIO;
421 
422 	if (atomic_read(&fifo->overflow) > 0) {
423 		/* buffer overflow is detected */
424 		snd_seq_fifo_clear(fifo);
425 		/* return error code */
426 		return -ENOSPC;
427 	}
428 
429 	cell = NULL;
430 	err = 0;
431 	snd_seq_fifo_lock(fifo);
432 
433 	/* while data available in queue */
434 	while (count >= sizeof(struct snd_seq_event)) {
435 		int nonblock;
436 
437 		nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
438 		if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) {
439 			break;
440 		}
441 		if (snd_seq_ev_is_variable(&cell->event)) {
442 			struct snd_seq_event tmpev;
443 			tmpev = cell->event;
444 			tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK;
445 			if (copy_to_user(buf, &tmpev, sizeof(struct snd_seq_event))) {
446 				err = -EFAULT;
447 				break;
448 			}
449 			count -= sizeof(struct snd_seq_event);
450 			buf += sizeof(struct snd_seq_event);
451 			err = snd_seq_expand_var_event(&cell->event, count,
452 						       (char __force *)buf, 0,
453 						       sizeof(struct snd_seq_event));
454 			if (err < 0)
455 				break;
456 			result += err;
457 			count -= err;
458 			buf += err;
459 		} else {
460 			if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) {
461 				err = -EFAULT;
462 				break;
463 			}
464 			count -= sizeof(struct snd_seq_event);
465 			buf += sizeof(struct snd_seq_event);
466 		}
467 		snd_seq_cell_free(cell);
468 		cell = NULL; /* to be sure */
469 		result += sizeof(struct snd_seq_event);
470 	}
471 
472 	if (err < 0) {
473 		if (cell)
474 			snd_seq_fifo_cell_putback(fifo, cell);
475 		if (err == -EAGAIN && result > 0)
476 			err = 0;
477 	}
478 	snd_seq_fifo_unlock(fifo);
479 
480 	return (err < 0) ? err : result;
481 }
482 
483 
484 /*
485  * check access permission to the port
486  */
check_port_perm(struct snd_seq_client_port * port,unsigned int flags)487 static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags)
488 {
489 	if ((port->capability & flags) != flags)
490 		return 0;
491 	return flags;
492 }
493 
494 /*
495  * check if the destination client is available, and return the pointer
496  * if filter is non-zero, client filter bitmap is tested.
497  */
get_event_dest_client(struct snd_seq_event * event,int filter)498 static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
499 						    int filter)
500 {
501 	struct snd_seq_client *dest;
502 
503 	dest = snd_seq_client_use_ptr(event->dest.client);
504 	if (dest == NULL)
505 		return NULL;
506 	if (! dest->accept_input)
507 		goto __not_avail;
508 	if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
509 	    ! test_bit(event->type, dest->event_filter))
510 		goto __not_avail;
511 	if (filter && !(dest->filter & filter))
512 		goto __not_avail;
513 
514 	return dest; /* ok - accessible */
515 __not_avail:
516 	snd_seq_client_unlock(dest);
517 	return NULL;
518 }
519 
520 
521 /*
522  * Return the error event.
523  *
524  * If the receiver client is a user client, the original event is
525  * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event.  If
526  * the original event is also variable length, the external data is
527  * copied after the event record.
528  * If the receiver client is a kernel client, the original event is
529  * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
530  * kmalloc.
531  */
bounce_error_event(struct snd_seq_client * client,struct snd_seq_event * event,int err,int atomic,int hop)532 static int bounce_error_event(struct snd_seq_client *client,
533 			      struct snd_seq_event *event,
534 			      int err, int atomic, int hop)
535 {
536 	struct snd_seq_event bounce_ev;
537 	int result;
538 
539 	if (client == NULL ||
540 	    ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) ||
541 	    ! client->accept_input)
542 		return 0; /* ignored */
543 
544 	/* set up quoted error */
545 	memset(&bounce_ev, 0, sizeof(bounce_ev));
546 	bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
547 	bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
548 	bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
549 	bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
550 	bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
551 	bounce_ev.dest.client = client->number;
552 	bounce_ev.dest.port = event->source.port;
553 	bounce_ev.data.quote.origin = event->dest;
554 	bounce_ev.data.quote.event = event;
555 	bounce_ev.data.quote.value = -err; /* use positive value */
556 	result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1);
557 	if (result < 0) {
558 		client->event_lost++;
559 		return result;
560 	}
561 
562 	return result;
563 }
564 
565 
566 /*
567  * rewrite the time-stamp of the event record with the curren time
568  * of the given queue.
569  * return non-zero if updated.
570  */
update_timestamp_of_queue(struct snd_seq_event * event,int queue,int real_time)571 static int update_timestamp_of_queue(struct snd_seq_event *event,
572 				     int queue, int real_time)
573 {
574 	struct snd_seq_queue *q;
575 
576 	q = queueptr(queue);
577 	if (! q)
578 		return 0;
579 	event->queue = queue;
580 	event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
581 	if (real_time) {
582 		event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
583 		event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
584 	} else {
585 		event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
586 		event->flags |= SNDRV_SEQ_TIME_STAMP_TICK;
587 	}
588 	queuefree(q);
589 	return 1;
590 }
591 
592 
593 /*
594  * deliver an event to the specified destination.
595  * if filter is non-zero, client filter bitmap is tested.
596  *
597  *  RETURN VALUE: 0 : if succeeded
598  *		 <0 : error
599  */
snd_seq_deliver_single_event(struct snd_seq_client * client,struct snd_seq_event * event,int filter,int atomic,int hop)600 static int snd_seq_deliver_single_event(struct snd_seq_client *client,
601 					struct snd_seq_event *event,
602 					int filter, int atomic, int hop)
603 {
604 	struct snd_seq_client *dest = NULL;
605 	struct snd_seq_client_port *dest_port = NULL;
606 	int result = -ENOENT;
607 	int direct;
608 
609 	direct = snd_seq_ev_is_direct(event);
610 
611 	dest = get_event_dest_client(event, filter);
612 	if (dest == NULL)
613 		goto __skip;
614 	dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
615 	if (dest_port == NULL)
616 		goto __skip;
617 
618 	/* check permission */
619 	if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
620 		result = -EPERM;
621 		goto __skip;
622 	}
623 
624 	if (dest_port->timestamping)
625 		update_timestamp_of_queue(event, dest_port->time_queue,
626 					  dest_port->time_real);
627 
628 	switch (dest->type) {
629 	case USER_CLIENT:
630 		if (dest->data.user.fifo)
631 			result = snd_seq_fifo_event_in(dest->data.user.fifo, event);
632 		break;
633 
634 	case KERNEL_CLIENT:
635 		if (dest_port->event_input == NULL)
636 			break;
637 		result = dest_port->event_input(event, direct,
638 						dest_port->private_data,
639 						atomic, hop);
640 		break;
641 	default:
642 		break;
643 	}
644 
645   __skip:
646 	if (dest_port)
647 		snd_seq_port_unlock(dest_port);
648 	if (dest)
649 		snd_seq_client_unlock(dest);
650 
651 	if (result < 0 && !direct) {
652 		result = bounce_error_event(client, event, result, atomic, hop);
653 	}
654 	return result;
655 }
656 
657 
658 /*
659  * send the event to all subscribers:
660  */
deliver_to_subscribers(struct snd_seq_client * client,struct snd_seq_event * event,int atomic,int hop)661 static int deliver_to_subscribers(struct snd_seq_client *client,
662 				  struct snd_seq_event *event,
663 				  int atomic, int hop)
664 {
665 	struct snd_seq_subscribers *subs;
666 	int err, result = 0, num_ev = 0;
667 	struct snd_seq_event event_saved;
668 	struct snd_seq_client_port *src_port;
669 	struct snd_seq_port_subs_info *grp;
670 
671 	src_port = snd_seq_port_use_ptr(client, event->source.port);
672 	if (src_port == NULL)
673 		return -EINVAL; /* invalid source port */
674 	/* save original event record */
675 	event_saved = *event;
676 	grp = &src_port->c_src;
677 
678 	/* lock list */
679 	if (atomic)
680 		read_lock(&grp->list_lock);
681 	else
682 		down_read_nested(&grp->list_mutex, hop);
683 	list_for_each_entry(subs, &grp->list_head, src_list) {
684 		/* both ports ready? */
685 		if (atomic_read(&subs->ref_count) != 2)
686 			continue;
687 		event->dest = subs->info.dest;
688 		if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
689 			/* convert time according to flag with subscription */
690 			update_timestamp_of_queue(event, subs->info.queue,
691 						  subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
692 		err = snd_seq_deliver_single_event(client, event,
693 						   0, atomic, hop);
694 		if (err < 0) {
695 			/* save first error that occurs and continue */
696 			if (!result)
697 				result = err;
698 			continue;
699 		}
700 		num_ev++;
701 		/* restore original event record */
702 		*event = event_saved;
703 	}
704 	if (atomic)
705 		read_unlock(&grp->list_lock);
706 	else
707 		up_read(&grp->list_mutex);
708 	*event = event_saved; /* restore */
709 	snd_seq_port_unlock(src_port);
710 	return (result < 0) ? result : num_ev;
711 }
712 
713 
714 #ifdef SUPPORT_BROADCAST
715 /*
716  * broadcast to all ports:
717  */
port_broadcast_event(struct snd_seq_client * client,struct snd_seq_event * event,int atomic,int hop)718 static int port_broadcast_event(struct snd_seq_client *client,
719 				struct snd_seq_event *event,
720 				int atomic, int hop)
721 {
722 	int num_ev = 0, err, result = 0;
723 	struct snd_seq_client *dest_client;
724 	struct snd_seq_client_port *port;
725 
726 	dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST);
727 	if (dest_client == NULL)
728 		return 0; /* no matching destination */
729 
730 	read_lock(&dest_client->ports_lock);
731 	list_for_each_entry(port, &dest_client->ports_list_head, list) {
732 		event->dest.port = port->addr.port;
733 		/* pass NULL as source client to avoid error bounce */
734 		err = snd_seq_deliver_single_event(NULL, event,
735 						   SNDRV_SEQ_FILTER_BROADCAST,
736 						   atomic, hop);
737 		if (err < 0) {
738 			/* save first error that occurs and continue */
739 			if (!result)
740 				result = err;
741 			continue;
742 		}
743 		num_ev++;
744 	}
745 	read_unlock(&dest_client->ports_lock);
746 	snd_seq_client_unlock(dest_client);
747 	event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */
748 	return (result < 0) ? result : num_ev;
749 }
750 
751 /*
752  * send the event to all clients:
753  * if destination port is also ADDRESS_BROADCAST, deliver to all ports.
754  */
broadcast_event(struct snd_seq_client * client,struct snd_seq_event * event,int atomic,int hop)755 static int broadcast_event(struct snd_seq_client *client,
756 			   struct snd_seq_event *event, int atomic, int hop)
757 {
758 	int err, result = 0, num_ev = 0;
759 	int dest;
760 	struct snd_seq_addr addr;
761 
762 	addr = event->dest; /* save */
763 
764 	for (dest = 0; dest < SNDRV_SEQ_MAX_CLIENTS; dest++) {
765 		/* don't send to itself */
766 		if (dest == client->number)
767 			continue;
768 		event->dest.client = dest;
769 		event->dest.port = addr.port;
770 		if (addr.port == SNDRV_SEQ_ADDRESS_BROADCAST)
771 			err = port_broadcast_event(client, event, atomic, hop);
772 		else
773 			/* pass NULL as source client to avoid error bounce */
774 			err = snd_seq_deliver_single_event(NULL, event,
775 							   SNDRV_SEQ_FILTER_BROADCAST,
776 							   atomic, hop);
777 		if (err < 0) {
778 			/* save first error that occurs and continue */
779 			if (!result)
780 				result = err;
781 			continue;
782 		}
783 		num_ev += err;
784 	}
785 	event->dest = addr; /* restore */
786 	return (result < 0) ? result : num_ev;
787 }
788 
789 
790 /* multicast - not supported yet */
multicast_event(struct snd_seq_client * client,struct snd_seq_event * event,int atomic,int hop)791 static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event,
792 			   int atomic, int hop)
793 {
794 	pr_debug("ALSA: seq: multicast not supported yet.\n");
795 	return 0; /* ignored */
796 }
797 #endif /* SUPPORT_BROADCAST */
798 
799 
800 /* deliver an event to the destination port(s).
801  * if the event is to subscribers or broadcast, the event is dispatched
802  * to multiple targets.
803  *
804  * RETURN VALUE: n > 0  : the number of delivered events.
805  *               n == 0 : the event was not passed to any client.
806  *               n < 0  : error - event was not processed.
807  */
snd_seq_deliver_event(struct snd_seq_client * client,struct snd_seq_event * event,int atomic,int hop)808 static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event,
809 				 int atomic, int hop)
810 {
811 	int result;
812 
813 	hop++;
814 	if (hop >= SNDRV_SEQ_MAX_HOPS) {
815 		pr_debug("ALSA: seq: too long delivery path (%d:%d->%d:%d)\n",
816 			   event->source.client, event->source.port,
817 			   event->dest.client, event->dest.port);
818 		return -EMLINK;
819 	}
820 
821 	if (snd_seq_ev_is_variable(event) &&
822 	    snd_BUG_ON(atomic && (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR)))
823 		return -EINVAL;
824 
825 	if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
826 	    event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
827 		result = deliver_to_subscribers(client, event, atomic, hop);
828 #ifdef SUPPORT_BROADCAST
829 	else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST ||
830 		 event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST)
831 		result = broadcast_event(client, event, atomic, hop);
832 	else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS)
833 		result = multicast_event(client, event, atomic, hop);
834 	else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST)
835 		result = port_broadcast_event(client, event, atomic, hop);
836 #endif
837 	else
838 		result = snd_seq_deliver_single_event(client, event, 0, atomic, hop);
839 
840 	return result;
841 }
842 
843 /*
844  * dispatch an event cell:
845  * This function is called only from queue check routines in timer
846  * interrupts or after enqueued.
847  * The event cell shall be released or re-queued in this function.
848  *
849  * RETURN VALUE: n > 0  : the number of delivered events.
850  *		 n == 0 : the event was not passed to any client.
851  *		 n < 0  : error - event was not processed.
852  */
snd_seq_dispatch_event(struct snd_seq_event_cell * cell,int atomic,int hop)853 int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
854 {
855 	struct snd_seq_client *client;
856 	int result;
857 
858 	if (snd_BUG_ON(!cell))
859 		return -EINVAL;
860 
861 	client = snd_seq_client_use_ptr(cell->event.source.client);
862 	if (client == NULL) {
863 		snd_seq_cell_free(cell); /* release this cell */
864 		return -EINVAL;
865 	}
866 
867 	if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) {
868 		/* NOTE event:
869 		 * the event cell is re-used as a NOTE-OFF event and
870 		 * enqueued again.
871 		 */
872 		struct snd_seq_event tmpev, *ev;
873 
874 		/* reserve this event to enqueue note-off later */
875 		tmpev = cell->event;
876 		tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
877 		result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
878 
879 		/*
880 		 * This was originally a note event.  We now re-use the
881 		 * cell for the note-off event.
882 		 */
883 
884 		ev = &cell->event;
885 		ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
886 		ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
887 
888 		/* add the duration time */
889 		switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) {
890 		case SNDRV_SEQ_TIME_STAMP_TICK:
891 			ev->time.tick += ev->data.note.duration;
892 			break;
893 		case SNDRV_SEQ_TIME_STAMP_REAL:
894 			/* unit for duration is ms */
895 			ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
896 			ev->time.time.tv_sec += ev->data.note.duration / 1000 +
897 						ev->time.time.tv_nsec / 1000000000;
898 			ev->time.time.tv_nsec %= 1000000000;
899 			break;
900 		}
901 		ev->data.note.velocity = ev->data.note.off_velocity;
902 
903 		/* Now queue this cell as the note off event */
904 		if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
905 			snd_seq_cell_free(cell); /* release this cell */
906 
907 	} else {
908 		/* Normal events:
909 		 * event cell is freed after processing the event
910 		 */
911 
912 		result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
913 		snd_seq_cell_free(cell);
914 	}
915 
916 	snd_seq_client_unlock(client);
917 	return result;
918 }
919 
920 
921 /* Allocate a cell from client pool and enqueue it to queue:
922  * if pool is empty and blocking is TRUE, sleep until a new cell is
923  * available.
924  */
snd_seq_client_enqueue_event(struct snd_seq_client * client,struct snd_seq_event * event,struct file * file,int blocking,int atomic,int hop,struct mutex * mutexp)925 static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
926 					struct snd_seq_event *event,
927 					struct file *file, int blocking,
928 					int atomic, int hop,
929 					struct mutex *mutexp)
930 {
931 	struct snd_seq_event_cell *cell;
932 	int err;
933 
934 	/* special queue values - force direct passing */
935 	if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
936 		event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
937 		event->queue = SNDRV_SEQ_QUEUE_DIRECT;
938 	} else
939 #ifdef SUPPORT_BROADCAST
940 		if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) {
941 			event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST;
942 			event->queue = SNDRV_SEQ_QUEUE_DIRECT;
943 		}
944 #endif
945 	if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
946 		/* check presence of source port */
947 		struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port);
948 		if (src_port == NULL)
949 			return -EINVAL;
950 		snd_seq_port_unlock(src_port);
951 	}
952 
953 	/* direct event processing without enqueued */
954 	if (snd_seq_ev_is_direct(event)) {
955 		if (event->type == SNDRV_SEQ_EVENT_NOTE)
956 			return -EINVAL; /* this event must be enqueued! */
957 		return snd_seq_deliver_event(client, event, atomic, hop);
958 	}
959 
960 	/* Not direct, normal queuing */
961 	if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
962 		return -EINVAL;  /* invalid queue */
963 	if (! snd_seq_write_pool_allocated(client))
964 		return -ENXIO; /* queue is not allocated */
965 
966 	/* allocate an event cell */
967 	err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
968 				file, mutexp);
969 	if (err < 0)
970 		return err;
971 
972 	/* we got a cell. enqueue it. */
973 	if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) {
974 		snd_seq_cell_free(cell);
975 		return err;
976 	}
977 
978 	return 0;
979 }
980 
981 
982 /*
983  * check validity of event type and data length.
984  * return non-zero if invalid.
985  */
check_event_type_and_length(struct snd_seq_event * ev)986 static int check_event_type_and_length(struct snd_seq_event *ev)
987 {
988 	switch (snd_seq_ev_length_type(ev)) {
989 	case SNDRV_SEQ_EVENT_LENGTH_FIXED:
990 		if (snd_seq_ev_is_variable_type(ev))
991 			return -EINVAL;
992 		break;
993 	case SNDRV_SEQ_EVENT_LENGTH_VARIABLE:
994 		if (! snd_seq_ev_is_variable_type(ev) ||
995 		    (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN)
996 			return -EINVAL;
997 		break;
998 	case SNDRV_SEQ_EVENT_LENGTH_VARUSR:
999 		if (! snd_seq_ev_is_direct(ev))
1000 			return -EINVAL;
1001 		break;
1002 	}
1003 	return 0;
1004 }
1005 
1006 
1007 /* handle write() */
1008 /* possible error values:
1009  *	-ENXIO	invalid client or file open mode
1010  *	-ENOMEM	malloc failed
1011  *	-EFAULT	seg. fault during copy from user space
1012  *	-EINVAL	invalid event
1013  *	-EAGAIN	no space in output pool
1014  *	-EINTR	interrupts while sleep
1015  *	-EMLINK	too many hops
1016  *	others	depends on return value from driver callback
1017  */
snd_seq_write(struct file * file,const char __user * buf,size_t count,loff_t * offset)1018 static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1019 			     size_t count, loff_t *offset)
1020 {
1021 	struct snd_seq_client *client = file->private_data;
1022 	int written = 0, len;
1023 	int err, handled;
1024 	struct snd_seq_event event;
1025 
1026 	if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
1027 		return -ENXIO;
1028 
1029 	/* check client structures are in place */
1030 	if (snd_BUG_ON(!client))
1031 		return -ENXIO;
1032 
1033 	if (!client->accept_output || client->pool == NULL)
1034 		return -ENXIO;
1035 
1036  repeat:
1037 	handled = 0;
1038 	/* allocate the pool now if the pool is not allocated yet */
1039 	mutex_lock(&client->ioctl_mutex);
1040 	if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
1041 		err = snd_seq_pool_init(client->pool);
1042 		if (err < 0)
1043 			goto out;
1044 	}
1045 
1046 	/* only process whole events */
1047 	err = -EINVAL;
1048 	while (count >= sizeof(struct snd_seq_event)) {
1049 		/* Read in the event header from the user */
1050 		len = sizeof(event);
1051 		if (copy_from_user(&event, buf, len)) {
1052 			err = -EFAULT;
1053 			break;
1054 		}
1055 		event.source.client = client->number;	/* fill in client number */
1056 		/* Check for extension data length */
1057 		if (check_event_type_and_length(&event)) {
1058 			err = -EINVAL;
1059 			break;
1060 		}
1061 
1062 		/* check for special events */
1063 		if (event.type == SNDRV_SEQ_EVENT_NONE)
1064 			goto __skip_event;
1065 		else if (snd_seq_ev_is_reserved(&event)) {
1066 			err = -EINVAL;
1067 			break;
1068 		}
1069 
1070 		if (snd_seq_ev_is_variable(&event)) {
1071 			int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK;
1072 			if ((size_t)(extlen + len) > count) {
1073 				/* back out, will get an error this time or next */
1074 				err = -EINVAL;
1075 				break;
1076 			}
1077 			/* set user space pointer */
1078 			event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
1079 			event.data.ext.ptr = (char __force *)buf
1080 						+ sizeof(struct snd_seq_event);
1081 			len += extlen; /* increment data length */
1082 		} else {
1083 #ifdef CONFIG_COMPAT
1084 			if (client->convert32 && snd_seq_ev_is_varusr(&event)) {
1085 				void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]);
1086 				event.data.ext.ptr = ptr;
1087 			}
1088 #endif
1089 		}
1090 
1091 		/* ok, enqueue it */
1092 		err = snd_seq_client_enqueue_event(client, &event, file,
1093 						   !(file->f_flags & O_NONBLOCK),
1094 						   0, 0, &client->ioctl_mutex);
1095 		if (err < 0)
1096 			break;
1097 		handled++;
1098 
1099 	__skip_event:
1100 		/* Update pointers and counts */
1101 		count -= len;
1102 		buf += len;
1103 		written += len;
1104 
1105 		/* let's have a coffee break if too many events are queued */
1106 		if (++handled >= 200) {
1107 			mutex_unlock(&client->ioctl_mutex);
1108 			goto repeat;
1109 		}
1110 	}
1111 
1112  out:
1113 	mutex_unlock(&client->ioctl_mutex);
1114 	return written ? written : err;
1115 }
1116 
1117 
1118 /*
1119  * handle polling
1120  */
snd_seq_poll(struct file * file,poll_table * wait)1121 static __poll_t snd_seq_poll(struct file *file, poll_table * wait)
1122 {
1123 	struct snd_seq_client *client = file->private_data;
1124 	__poll_t mask = 0;
1125 
1126 	/* check client structures are in place */
1127 	if (snd_BUG_ON(!client))
1128 		return EPOLLERR;
1129 
1130 	if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
1131 	    client->data.user.fifo) {
1132 
1133 		/* check if data is available in the outqueue */
1134 		if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
1135 			mask |= EPOLLIN | EPOLLRDNORM;
1136 	}
1137 
1138 	if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
1139 
1140 		/* check if data is available in the pool */
1141 		if (!snd_seq_write_pool_allocated(client) ||
1142 		    snd_seq_pool_poll_wait(client->pool, file, wait))
1143 			mask |= EPOLLOUT | EPOLLWRNORM;
1144 	}
1145 
1146 	return mask;
1147 }
1148 
1149 
1150 /*-----------------------------------------------------*/
1151 
snd_seq_ioctl_pversion(struct snd_seq_client * client,void * arg)1152 static int snd_seq_ioctl_pversion(struct snd_seq_client *client, void *arg)
1153 {
1154 	int *pversion = arg;
1155 
1156 	*pversion = SNDRV_SEQ_VERSION;
1157 	return 0;
1158 }
1159 
snd_seq_ioctl_client_id(struct snd_seq_client * client,void * arg)1160 static int snd_seq_ioctl_client_id(struct snd_seq_client *client, void *arg)
1161 {
1162 	int *client_id = arg;
1163 
1164 	*client_id = client->number;
1165 	return 0;
1166 }
1167 
1168 /* SYSTEM_INFO ioctl() */
snd_seq_ioctl_system_info(struct snd_seq_client * client,void * arg)1169 static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void *arg)
1170 {
1171 	struct snd_seq_system_info *info = arg;
1172 
1173 	memset(info, 0, sizeof(*info));
1174 	/* fill the info fields */
1175 	info->queues = SNDRV_SEQ_MAX_QUEUES;
1176 	info->clients = SNDRV_SEQ_MAX_CLIENTS;
1177 	info->ports = SNDRV_SEQ_MAX_PORTS;
1178 	info->channels = 256;	/* fixed limit */
1179 	info->cur_clients = client_usage.cur;
1180 	info->cur_queues = snd_seq_queue_get_cur_queues();
1181 
1182 	return 0;
1183 }
1184 
1185 
1186 /* RUNNING_MODE ioctl() */
snd_seq_ioctl_running_mode(struct snd_seq_client * client,void * arg)1187 static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void  *arg)
1188 {
1189 	struct snd_seq_running_info *info = arg;
1190 	struct snd_seq_client *cptr;
1191 	int err = 0;
1192 
1193 	/* requested client number */
1194 	cptr = snd_seq_client_use_ptr(info->client);
1195 	if (cptr == NULL)
1196 		return -ENOENT;		/* don't change !!! */
1197 
1198 #ifdef SNDRV_BIG_ENDIAN
1199 	if (!info->big_endian) {
1200 		err = -EINVAL;
1201 		goto __err;
1202 	}
1203 #else
1204 	if (info->big_endian) {
1205 		err = -EINVAL;
1206 		goto __err;
1207 	}
1208 
1209 #endif
1210 	if (info->cpu_mode > sizeof(long)) {
1211 		err = -EINVAL;
1212 		goto __err;
1213 	}
1214 	cptr->convert32 = (info->cpu_mode < sizeof(long));
1215  __err:
1216 	snd_seq_client_unlock(cptr);
1217 	return err;
1218 }
1219 
1220 /* CLIENT_INFO ioctl() */
get_client_info(struct snd_seq_client * cptr,struct snd_seq_client_info * info)1221 static void get_client_info(struct snd_seq_client *cptr,
1222 			    struct snd_seq_client_info *info)
1223 {
1224 	info->client = cptr->number;
1225 
1226 	/* fill the info fields */
1227 	info->type = cptr->type;
1228 	strcpy(info->name, cptr->name);
1229 	info->filter = cptr->filter;
1230 	info->event_lost = cptr->event_lost;
1231 	memcpy(info->event_filter, cptr->event_filter, 32);
1232 	info->num_ports = cptr->num_ports;
1233 
1234 	if (cptr->type == USER_CLIENT)
1235 		info->pid = pid_vnr(cptr->data.user.owner);
1236 	else
1237 		info->pid = -1;
1238 
1239 	if (cptr->type == KERNEL_CLIENT)
1240 		info->card = cptr->data.kernel.card ? cptr->data.kernel.card->number : -1;
1241 	else
1242 		info->card = -1;
1243 
1244 	memset(info->reserved, 0, sizeof(info->reserved));
1245 }
1246 
snd_seq_ioctl_get_client_info(struct snd_seq_client * client,void * arg)1247 static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
1248 					 void *arg)
1249 {
1250 	struct snd_seq_client_info *client_info = arg;
1251 	struct snd_seq_client *cptr;
1252 
1253 	/* requested client number */
1254 	cptr = snd_seq_client_use_ptr(client_info->client);
1255 	if (cptr == NULL)
1256 		return -ENOENT;		/* don't change !!! */
1257 
1258 	get_client_info(cptr, client_info);
1259 	snd_seq_client_unlock(cptr);
1260 
1261 	return 0;
1262 }
1263 
1264 
1265 /* CLIENT_INFO ioctl() */
snd_seq_ioctl_set_client_info(struct snd_seq_client * client,void * arg)1266 static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
1267 					 void *arg)
1268 {
1269 	struct snd_seq_client_info *client_info = arg;
1270 
1271 	/* it is not allowed to set the info fields for an another client */
1272 	if (client->number != client_info->client)
1273 		return -EPERM;
1274 	/* also client type must be set now */
1275 	if (client->type != client_info->type)
1276 		return -EINVAL;
1277 
1278 	/* fill the info fields */
1279 	if (client_info->name[0])
1280 		strscpy(client->name, client_info->name, sizeof(client->name));
1281 
1282 	client->filter = client_info->filter;
1283 	client->event_lost = client_info->event_lost;
1284 	memcpy(client->event_filter, client_info->event_filter, 32);
1285 
1286 	return 0;
1287 }
1288 
1289 
1290 /*
1291  * CREATE PORT ioctl()
1292  */
snd_seq_ioctl_create_port(struct snd_seq_client * client,void * arg)1293 static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1294 {
1295 	struct snd_seq_port_info *info = arg;
1296 	struct snd_seq_client_port *port;
1297 	struct snd_seq_port_callback *callback;
1298 	int port_idx;
1299 
1300 	/* it is not allowed to create the port for an another client */
1301 	if (info->addr.client != client->number)
1302 		return -EPERM;
1303 
1304 	port = snd_seq_create_port(client, (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info->addr.port : -1);
1305 	if (port == NULL)
1306 		return -ENOMEM;
1307 
1308 	if (client->type == USER_CLIENT && info->kernel) {
1309 		port_idx = port->addr.port;
1310 		snd_seq_port_unlock(port);
1311 		snd_seq_delete_port(client, port_idx);
1312 		return -EINVAL;
1313 	}
1314 	if (client->type == KERNEL_CLIENT) {
1315 		if ((callback = info->kernel) != NULL) {
1316 			if (callback->owner)
1317 				port->owner = callback->owner;
1318 			port->private_data = callback->private_data;
1319 			port->private_free = callback->private_free;
1320 			port->event_input = callback->event_input;
1321 			port->c_src.open = callback->subscribe;
1322 			port->c_src.close = callback->unsubscribe;
1323 			port->c_dest.open = callback->use;
1324 			port->c_dest.close = callback->unuse;
1325 		}
1326 	}
1327 
1328 	info->addr = port->addr;
1329 
1330 	snd_seq_set_port_info(port, info);
1331 	snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
1332 	snd_seq_port_unlock(port);
1333 
1334 	return 0;
1335 }
1336 
1337 /*
1338  * DELETE PORT ioctl()
1339  */
snd_seq_ioctl_delete_port(struct snd_seq_client * client,void * arg)1340 static int snd_seq_ioctl_delete_port(struct snd_seq_client *client, void *arg)
1341 {
1342 	struct snd_seq_port_info *info = arg;
1343 	int err;
1344 
1345 	/* it is not allowed to remove the port for an another client */
1346 	if (info->addr.client != client->number)
1347 		return -EPERM;
1348 
1349 	err = snd_seq_delete_port(client, info->addr.port);
1350 	if (err >= 0)
1351 		snd_seq_system_client_ev_port_exit(client->number, info->addr.port);
1352 	return err;
1353 }
1354 
1355 
1356 /*
1357  * GET_PORT_INFO ioctl() (on any client)
1358  */
snd_seq_ioctl_get_port_info(struct snd_seq_client * client,void * arg)1359 static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg)
1360 {
1361 	struct snd_seq_port_info *info = arg;
1362 	struct snd_seq_client *cptr;
1363 	struct snd_seq_client_port *port;
1364 
1365 	cptr = snd_seq_client_use_ptr(info->addr.client);
1366 	if (cptr == NULL)
1367 		return -ENXIO;
1368 
1369 	port = snd_seq_port_use_ptr(cptr, info->addr.port);
1370 	if (port == NULL) {
1371 		snd_seq_client_unlock(cptr);
1372 		return -ENOENT;			/* don't change */
1373 	}
1374 
1375 	/* get port info */
1376 	snd_seq_get_port_info(port, info);
1377 	snd_seq_port_unlock(port);
1378 	snd_seq_client_unlock(cptr);
1379 
1380 	return 0;
1381 }
1382 
1383 
1384 /*
1385  * SET_PORT_INFO ioctl() (only ports on this/own client)
1386  */
snd_seq_ioctl_set_port_info(struct snd_seq_client * client,void * arg)1387 static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void *arg)
1388 {
1389 	struct snd_seq_port_info *info = arg;
1390 	struct snd_seq_client_port *port;
1391 
1392 	if (info->addr.client != client->number) /* only set our own ports ! */
1393 		return -EPERM;
1394 	port = snd_seq_port_use_ptr(client, info->addr.port);
1395 	if (port) {
1396 		snd_seq_set_port_info(port, info);
1397 		snd_seq_port_unlock(port);
1398 	}
1399 	return 0;
1400 }
1401 
1402 
1403 /*
1404  * port subscription (connection)
1405  */
1406 #define PERM_RD		(SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
1407 #define PERM_WR		(SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
1408 
check_subscription_permission(struct snd_seq_client * client,struct snd_seq_client_port * sport,struct snd_seq_client_port * dport,struct snd_seq_port_subscribe * subs)1409 static int check_subscription_permission(struct snd_seq_client *client,
1410 					 struct snd_seq_client_port *sport,
1411 					 struct snd_seq_client_port *dport,
1412 					 struct snd_seq_port_subscribe *subs)
1413 {
1414 	if (client->number != subs->sender.client &&
1415 	    client->number != subs->dest.client) {
1416 		/* connection by third client - check export permission */
1417 		if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
1418 			return -EPERM;
1419 		if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
1420 			return -EPERM;
1421 	}
1422 
1423 	/* check read permission */
1424 	/* if sender or receiver is the subscribing client itself,
1425 	 * no permission check is necessary
1426 	 */
1427 	if (client->number != subs->sender.client) {
1428 		if (! check_port_perm(sport, PERM_RD))
1429 			return -EPERM;
1430 	}
1431 	/* check write permission */
1432 	if (client->number != subs->dest.client) {
1433 		if (! check_port_perm(dport, PERM_WR))
1434 			return -EPERM;
1435 	}
1436 	return 0;
1437 }
1438 
1439 /*
1440  * send an subscription notify event to user client:
1441  * client must be user client.
1442  */
snd_seq_client_notify_subscription(int client,int port,struct snd_seq_port_subscribe * info,int evtype)1443 int snd_seq_client_notify_subscription(int client, int port,
1444 				       struct snd_seq_port_subscribe *info,
1445 				       int evtype)
1446 {
1447 	struct snd_seq_event event;
1448 
1449 	memset(&event, 0, sizeof(event));
1450 	event.type = evtype;
1451 	event.data.connect.dest = info->dest;
1452 	event.data.connect.sender = info->sender;
1453 
1454 	return snd_seq_system_notify(client, port, &event);  /* non-atomic */
1455 }
1456 
1457 
1458 /*
1459  * add to port's subscription list IOCTL interface
1460  */
snd_seq_ioctl_subscribe_port(struct snd_seq_client * client,void * arg)1461 static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
1462 					void *arg)
1463 {
1464 	struct snd_seq_port_subscribe *subs = arg;
1465 	int result = -EINVAL;
1466 	struct snd_seq_client *receiver = NULL, *sender = NULL;
1467 	struct snd_seq_client_port *sport = NULL, *dport = NULL;
1468 
1469 	if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
1470 		goto __end;
1471 	if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
1472 		goto __end;
1473 	if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
1474 		goto __end;
1475 	if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
1476 		goto __end;
1477 
1478 	result = check_subscription_permission(client, sport, dport, subs);
1479 	if (result < 0)
1480 		goto __end;
1481 
1482 	/* connect them */
1483 	result = snd_seq_port_connect(client, sender, sport, receiver, dport, subs);
1484 	if (! result) /* broadcast announce */
1485 		snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
1486 						   subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
1487       __end:
1488       	if (sport)
1489 		snd_seq_port_unlock(sport);
1490 	if (dport)
1491 		snd_seq_port_unlock(dport);
1492 	if (sender)
1493 		snd_seq_client_unlock(sender);
1494 	if (receiver)
1495 		snd_seq_client_unlock(receiver);
1496 	return result;
1497 }
1498 
1499 
1500 /*
1501  * remove from port's subscription list
1502  */
snd_seq_ioctl_unsubscribe_port(struct snd_seq_client * client,void * arg)1503 static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
1504 					  void *arg)
1505 {
1506 	struct snd_seq_port_subscribe *subs = arg;
1507 	int result = -ENXIO;
1508 	struct snd_seq_client *receiver = NULL, *sender = NULL;
1509 	struct snd_seq_client_port *sport = NULL, *dport = NULL;
1510 
1511 	if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL)
1512 		goto __end;
1513 	if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
1514 		goto __end;
1515 	if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
1516 		goto __end;
1517 	if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL)
1518 		goto __end;
1519 
1520 	result = check_subscription_permission(client, sport, dport, subs);
1521 	if (result < 0)
1522 		goto __end;
1523 
1524 	result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, subs);
1525 	if (! result) /* broadcast announce */
1526 		snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
1527 						   subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
1528       __end:
1529       	if (sport)
1530 		snd_seq_port_unlock(sport);
1531 	if (dport)
1532 		snd_seq_port_unlock(dport);
1533 	if (sender)
1534 		snd_seq_client_unlock(sender);
1535 	if (receiver)
1536 		snd_seq_client_unlock(receiver);
1537 	return result;
1538 }
1539 
1540 
1541 /* CREATE_QUEUE ioctl() */
snd_seq_ioctl_create_queue(struct snd_seq_client * client,void * arg)1542 static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
1543 {
1544 	struct snd_seq_queue_info *info = arg;
1545 	struct snd_seq_queue *q;
1546 
1547 	q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
1548 	if (IS_ERR(q))
1549 		return PTR_ERR(q);
1550 
1551 	info->queue = q->queue;
1552 	info->locked = q->locked;
1553 	info->owner = q->owner;
1554 
1555 	/* set queue name */
1556 	if (!info->name[0])
1557 		snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
1558 	strscpy(q->name, info->name, sizeof(q->name));
1559 	snd_use_lock_free(&q->use_lock);
1560 
1561 	return 0;
1562 }
1563 
1564 /* DELETE_QUEUE ioctl() */
snd_seq_ioctl_delete_queue(struct snd_seq_client * client,void * arg)1565 static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client, void *arg)
1566 {
1567 	struct snd_seq_queue_info *info = arg;
1568 
1569 	return snd_seq_queue_delete(client->number, info->queue);
1570 }
1571 
1572 /* GET_QUEUE_INFO ioctl() */
snd_seq_ioctl_get_queue_info(struct snd_seq_client * client,void * arg)1573 static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
1574 					void *arg)
1575 {
1576 	struct snd_seq_queue_info *info = arg;
1577 	struct snd_seq_queue *q;
1578 
1579 	q = queueptr(info->queue);
1580 	if (q == NULL)
1581 		return -EINVAL;
1582 
1583 	memset(info, 0, sizeof(*info));
1584 	info->queue = q->queue;
1585 	info->owner = q->owner;
1586 	info->locked = q->locked;
1587 	strscpy(info->name, q->name, sizeof(info->name));
1588 	queuefree(q);
1589 
1590 	return 0;
1591 }
1592 
1593 /* SET_QUEUE_INFO ioctl() */
snd_seq_ioctl_set_queue_info(struct snd_seq_client * client,void * arg)1594 static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
1595 					void *arg)
1596 {
1597 	struct snd_seq_queue_info *info = arg;
1598 	struct snd_seq_queue *q;
1599 
1600 	if (info->owner != client->number)
1601 		return -EINVAL;
1602 
1603 	/* change owner/locked permission */
1604 	if (snd_seq_queue_check_access(info->queue, client->number)) {
1605 		if (snd_seq_queue_set_owner(info->queue, client->number, info->locked) < 0)
1606 			return -EPERM;
1607 		if (info->locked)
1608 			snd_seq_queue_use(info->queue, client->number, 1);
1609 	} else {
1610 		return -EPERM;
1611 	}
1612 
1613 	q = queueptr(info->queue);
1614 	if (! q)
1615 		return -EINVAL;
1616 	if (q->owner != client->number) {
1617 		queuefree(q);
1618 		return -EPERM;
1619 	}
1620 	strscpy(q->name, info->name, sizeof(q->name));
1621 	queuefree(q);
1622 
1623 	return 0;
1624 }
1625 
1626 /* GET_NAMED_QUEUE ioctl() */
snd_seq_ioctl_get_named_queue(struct snd_seq_client * client,void * arg)1627 static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client,
1628 					 void *arg)
1629 {
1630 	struct snd_seq_queue_info *info = arg;
1631 	struct snd_seq_queue *q;
1632 
1633 	q = snd_seq_queue_find_name(info->name);
1634 	if (q == NULL)
1635 		return -EINVAL;
1636 	info->queue = q->queue;
1637 	info->owner = q->owner;
1638 	info->locked = q->locked;
1639 	queuefree(q);
1640 
1641 	return 0;
1642 }
1643 
1644 /* GET_QUEUE_STATUS ioctl() */
snd_seq_ioctl_get_queue_status(struct snd_seq_client * client,void * arg)1645 static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
1646 					  void *arg)
1647 {
1648 	struct snd_seq_queue_status *status = arg;
1649 	struct snd_seq_queue *queue;
1650 	struct snd_seq_timer *tmr;
1651 
1652 	queue = queueptr(status->queue);
1653 	if (queue == NULL)
1654 		return -EINVAL;
1655 	memset(status, 0, sizeof(*status));
1656 	status->queue = queue->queue;
1657 
1658 	tmr = queue->timer;
1659 	status->events = queue->tickq->cells + queue->timeq->cells;
1660 
1661 	status->time = snd_seq_timer_get_cur_time(tmr, true);
1662 	status->tick = snd_seq_timer_get_cur_tick(tmr);
1663 
1664 	status->running = tmr->running;
1665 
1666 	status->flags = queue->flags;
1667 	queuefree(queue);
1668 
1669 	return 0;
1670 }
1671 
1672 
1673 /* GET_QUEUE_TEMPO ioctl() */
snd_seq_ioctl_get_queue_tempo(struct snd_seq_client * client,void * arg)1674 static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
1675 					 void *arg)
1676 {
1677 	struct snd_seq_queue_tempo *tempo = arg;
1678 	struct snd_seq_queue *queue;
1679 	struct snd_seq_timer *tmr;
1680 
1681 	queue = queueptr(tempo->queue);
1682 	if (queue == NULL)
1683 		return -EINVAL;
1684 	memset(tempo, 0, sizeof(*tempo));
1685 	tempo->queue = queue->queue;
1686 
1687 	tmr = queue->timer;
1688 
1689 	tempo->tempo = tmr->tempo;
1690 	tempo->ppq = tmr->ppq;
1691 	tempo->skew_value = tmr->skew;
1692 	tempo->skew_base = tmr->skew_base;
1693 	queuefree(queue);
1694 
1695 	return 0;
1696 }
1697 
1698 
1699 /* SET_QUEUE_TEMPO ioctl() */
snd_seq_set_queue_tempo(int client,struct snd_seq_queue_tempo * tempo)1700 int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
1701 {
1702 	if (!snd_seq_queue_check_access(tempo->queue, client))
1703 		return -EPERM;
1704 	return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
1705 }
1706 EXPORT_SYMBOL(snd_seq_set_queue_tempo);
1707 
snd_seq_ioctl_set_queue_tempo(struct snd_seq_client * client,void * arg)1708 static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
1709 					 void *arg)
1710 {
1711 	struct snd_seq_queue_tempo *tempo = arg;
1712 	int result;
1713 
1714 	result = snd_seq_set_queue_tempo(client->number, tempo);
1715 	return result < 0 ? result : 0;
1716 }
1717 
1718 
1719 /* GET_QUEUE_TIMER ioctl() */
snd_seq_ioctl_get_queue_timer(struct snd_seq_client * client,void * arg)1720 static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
1721 					 void *arg)
1722 {
1723 	struct snd_seq_queue_timer *timer = arg;
1724 	struct snd_seq_queue *queue;
1725 	struct snd_seq_timer *tmr;
1726 
1727 	queue = queueptr(timer->queue);
1728 	if (queue == NULL)
1729 		return -EINVAL;
1730 
1731 	mutex_lock(&queue->timer_mutex);
1732 	tmr = queue->timer;
1733 	memset(timer, 0, sizeof(*timer));
1734 	timer->queue = queue->queue;
1735 
1736 	timer->type = tmr->type;
1737 	if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
1738 		timer->u.alsa.id = tmr->alsa_id;
1739 		timer->u.alsa.resolution = tmr->preferred_resolution;
1740 	}
1741 	mutex_unlock(&queue->timer_mutex);
1742 	queuefree(queue);
1743 
1744 	return 0;
1745 }
1746 
1747 
1748 /* SET_QUEUE_TIMER ioctl() */
snd_seq_ioctl_set_queue_timer(struct snd_seq_client * client,void * arg)1749 static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
1750 					 void *arg)
1751 {
1752 	struct snd_seq_queue_timer *timer = arg;
1753 	int result = 0;
1754 
1755 	if (timer->type != SNDRV_SEQ_TIMER_ALSA)
1756 		return -EINVAL;
1757 
1758 	if (snd_seq_queue_check_access(timer->queue, client->number)) {
1759 		struct snd_seq_queue *q;
1760 		struct snd_seq_timer *tmr;
1761 
1762 		q = queueptr(timer->queue);
1763 		if (q == NULL)
1764 			return -ENXIO;
1765 		mutex_lock(&q->timer_mutex);
1766 		tmr = q->timer;
1767 		snd_seq_queue_timer_close(timer->queue);
1768 		tmr->type = timer->type;
1769 		if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
1770 			tmr->alsa_id = timer->u.alsa.id;
1771 			tmr->preferred_resolution = timer->u.alsa.resolution;
1772 		}
1773 		result = snd_seq_queue_timer_open(timer->queue);
1774 		mutex_unlock(&q->timer_mutex);
1775 		queuefree(q);
1776 	} else {
1777 		return -EPERM;
1778 	}
1779 
1780 	return result;
1781 }
1782 
1783 
1784 /* GET_QUEUE_CLIENT ioctl() */
snd_seq_ioctl_get_queue_client(struct snd_seq_client * client,void * arg)1785 static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
1786 					  void *arg)
1787 {
1788 	struct snd_seq_queue_client *info = arg;
1789 	int used;
1790 
1791 	used = snd_seq_queue_is_used(info->queue, client->number);
1792 	if (used < 0)
1793 		return -EINVAL;
1794 	info->used = used;
1795 	info->client = client->number;
1796 
1797 	return 0;
1798 }
1799 
1800 
1801 /* SET_QUEUE_CLIENT ioctl() */
snd_seq_ioctl_set_queue_client(struct snd_seq_client * client,void * arg)1802 static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
1803 					  void *arg)
1804 {
1805 	struct snd_seq_queue_client *info = arg;
1806 	int err;
1807 
1808 	if (info->used >= 0) {
1809 		err = snd_seq_queue_use(info->queue, client->number, info->used);
1810 		if (err < 0)
1811 			return err;
1812 	}
1813 
1814 	return snd_seq_ioctl_get_queue_client(client, arg);
1815 }
1816 
1817 
1818 /* GET_CLIENT_POOL ioctl() */
snd_seq_ioctl_get_client_pool(struct snd_seq_client * client,void * arg)1819 static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
1820 					 void *arg)
1821 {
1822 	struct snd_seq_client_pool *info = arg;
1823 	struct snd_seq_client *cptr;
1824 
1825 	cptr = snd_seq_client_use_ptr(info->client);
1826 	if (cptr == NULL)
1827 		return -ENOENT;
1828 	memset(info, 0, sizeof(*info));
1829 	info->client = cptr->number;
1830 	info->output_pool = cptr->pool->size;
1831 	info->output_room = cptr->pool->room;
1832 	info->output_free = info->output_pool;
1833 	info->output_free = snd_seq_unused_cells(cptr->pool);
1834 	if (cptr->type == USER_CLIENT) {
1835 		info->input_pool = cptr->data.user.fifo_pool_size;
1836 		info->input_free = info->input_pool;
1837 		info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
1838 	} else {
1839 		info->input_pool = 0;
1840 		info->input_free = 0;
1841 	}
1842 	snd_seq_client_unlock(cptr);
1843 
1844 	return 0;
1845 }
1846 
1847 /* SET_CLIENT_POOL ioctl() */
snd_seq_ioctl_set_client_pool(struct snd_seq_client * client,void * arg)1848 static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
1849 					 void *arg)
1850 {
1851 	struct snd_seq_client_pool *info = arg;
1852 	int rc;
1853 
1854 	if (client->number != info->client)
1855 		return -EINVAL; /* can't change other clients */
1856 
1857 	if (info->output_pool >= 1 && info->output_pool <= SNDRV_SEQ_MAX_EVENTS &&
1858 	    (! snd_seq_write_pool_allocated(client) ||
1859 	     info->output_pool != client->pool->size)) {
1860 		if (snd_seq_write_pool_allocated(client)) {
1861 			/* is the pool in use? */
1862 			if (atomic_read(&client->pool->counter))
1863 				return -EBUSY;
1864 			/* remove all existing cells */
1865 			snd_seq_pool_mark_closing(client->pool);
1866 			snd_seq_pool_done(client->pool);
1867 		}
1868 		client->pool->size = info->output_pool;
1869 		rc = snd_seq_pool_init(client->pool);
1870 		if (rc < 0)
1871 			return rc;
1872 	}
1873 	if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
1874 	    info->input_pool >= 1 &&
1875 	    info->input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
1876 	    info->input_pool != client->data.user.fifo_pool_size) {
1877 		/* change pool size */
1878 		rc = snd_seq_fifo_resize(client->data.user.fifo, info->input_pool);
1879 		if (rc < 0)
1880 			return rc;
1881 		client->data.user.fifo_pool_size = info->input_pool;
1882 	}
1883 	if (info->output_room >= 1 &&
1884 	    info->output_room <= client->pool->size) {
1885 		client->pool->room  = info->output_room;
1886 	}
1887 
1888 	return snd_seq_ioctl_get_client_pool(client, arg);
1889 }
1890 
1891 
1892 /* REMOVE_EVENTS ioctl() */
snd_seq_ioctl_remove_events(struct snd_seq_client * client,void * arg)1893 static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
1894 				       void *arg)
1895 {
1896 	struct snd_seq_remove_events *info = arg;
1897 
1898 	/*
1899 	 * Input mostly not implemented XXX.
1900 	 */
1901 	if (info->remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
1902 		/*
1903 		 * No restrictions so for a user client we can clear
1904 		 * the whole fifo
1905 		 */
1906 		if (client->type == USER_CLIENT && client->data.user.fifo)
1907 			snd_seq_fifo_clear(client->data.user.fifo);
1908 	}
1909 
1910 	if (info->remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
1911 		snd_seq_queue_remove_cells(client->number, info);
1912 
1913 	return 0;
1914 }
1915 
1916 
1917 /*
1918  * get subscription info
1919  */
snd_seq_ioctl_get_subscription(struct snd_seq_client * client,void * arg)1920 static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
1921 					  void *arg)
1922 {
1923 	struct snd_seq_port_subscribe *subs = arg;
1924 	int result;
1925 	struct snd_seq_client *sender = NULL;
1926 	struct snd_seq_client_port *sport = NULL;
1927 
1928 	result = -EINVAL;
1929 	if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
1930 		goto __end;
1931 	if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
1932 		goto __end;
1933 	result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
1934 					       subs);
1935       __end:
1936       	if (sport)
1937 		snd_seq_port_unlock(sport);
1938 	if (sender)
1939 		snd_seq_client_unlock(sender);
1940 
1941 	return result;
1942 }
1943 
1944 
1945 /*
1946  * get subscription info - check only its presence
1947  */
snd_seq_ioctl_query_subs(struct snd_seq_client * client,void * arg)1948 static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
1949 {
1950 	struct snd_seq_query_subs *subs = arg;
1951 	int result = -ENXIO;
1952 	struct snd_seq_client *cptr = NULL;
1953 	struct snd_seq_client_port *port = NULL;
1954 	struct snd_seq_port_subs_info *group;
1955 	struct list_head *p;
1956 	int i;
1957 
1958 	if ((cptr = snd_seq_client_use_ptr(subs->root.client)) == NULL)
1959 		goto __end;
1960 	if ((port = snd_seq_port_use_ptr(cptr, subs->root.port)) == NULL)
1961 		goto __end;
1962 
1963 	switch (subs->type) {
1964 	case SNDRV_SEQ_QUERY_SUBS_READ:
1965 		group = &port->c_src;
1966 		break;
1967 	case SNDRV_SEQ_QUERY_SUBS_WRITE:
1968 		group = &port->c_dest;
1969 		break;
1970 	default:
1971 		goto __end;
1972 	}
1973 
1974 	down_read(&group->list_mutex);
1975 	/* search for the subscriber */
1976 	subs->num_subs = group->count;
1977 	i = 0;
1978 	result = -ENOENT;
1979 	list_for_each(p, &group->list_head) {
1980 		if (i++ == subs->index) {
1981 			/* found! */
1982 			struct snd_seq_subscribers *s;
1983 			if (subs->type == SNDRV_SEQ_QUERY_SUBS_READ) {
1984 				s = list_entry(p, struct snd_seq_subscribers, src_list);
1985 				subs->addr = s->info.dest;
1986 			} else {
1987 				s = list_entry(p, struct snd_seq_subscribers, dest_list);
1988 				subs->addr = s->info.sender;
1989 			}
1990 			subs->flags = s->info.flags;
1991 			subs->queue = s->info.queue;
1992 			result = 0;
1993 			break;
1994 		}
1995 	}
1996 	up_read(&group->list_mutex);
1997 
1998       __end:
1999    	if (port)
2000 		snd_seq_port_unlock(port);
2001 	if (cptr)
2002 		snd_seq_client_unlock(cptr);
2003 
2004 	return result;
2005 }
2006 
2007 
2008 /*
2009  * query next client
2010  */
snd_seq_ioctl_query_next_client(struct snd_seq_client * client,void * arg)2011 static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
2012 					   void *arg)
2013 {
2014 	struct snd_seq_client_info *info = arg;
2015 	struct snd_seq_client *cptr = NULL;
2016 
2017 	/* search for next client */
2018 	if (info->client < INT_MAX)
2019 		info->client++;
2020 	if (info->client < 0)
2021 		info->client = 0;
2022 	for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
2023 		cptr = snd_seq_client_use_ptr(info->client);
2024 		if (cptr)
2025 			break; /* found */
2026 	}
2027 	if (cptr == NULL)
2028 		return -ENOENT;
2029 
2030 	get_client_info(cptr, info);
2031 	snd_seq_client_unlock(cptr);
2032 
2033 	return 0;
2034 }
2035 
2036 /*
2037  * query next port
2038  */
snd_seq_ioctl_query_next_port(struct snd_seq_client * client,void * arg)2039 static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
2040 					 void *arg)
2041 {
2042 	struct snd_seq_port_info *info = arg;
2043 	struct snd_seq_client *cptr;
2044 	struct snd_seq_client_port *port = NULL;
2045 
2046 	cptr = snd_seq_client_use_ptr(info->addr.client);
2047 	if (cptr == NULL)
2048 		return -ENXIO;
2049 
2050 	/* search for next port */
2051 	info->addr.port++;
2052 	port = snd_seq_port_query_nearest(cptr, info);
2053 	if (port == NULL) {
2054 		snd_seq_client_unlock(cptr);
2055 		return -ENOENT;
2056 	}
2057 
2058 	/* get port info */
2059 	info->addr = port->addr;
2060 	snd_seq_get_port_info(port, info);
2061 	snd_seq_port_unlock(port);
2062 	snd_seq_client_unlock(cptr);
2063 
2064 	return 0;
2065 }
2066 
2067 /* -------------------------------------------------------- */
2068 
2069 static const struct ioctl_handler {
2070 	unsigned int cmd;
2071 	int (*func)(struct snd_seq_client *client, void *arg);
2072 } ioctl_handlers[] = {
2073 	{ SNDRV_SEQ_IOCTL_PVERSION, snd_seq_ioctl_pversion },
2074 	{ SNDRV_SEQ_IOCTL_CLIENT_ID, snd_seq_ioctl_client_id },
2075 	{ SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
2076 	{ SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
2077 	{ SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
2078 	{ SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info },
2079 	{ SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port },
2080 	{ SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port },
2081 	{ SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info },
2082 	{ SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info },
2083 	{ SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port },
2084 	{ SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port },
2085 	{ SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue },
2086 	{ SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue },
2087 	{ SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info },
2088 	{ SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info },
2089 	{ SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue },
2090 	{ SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status },
2091 	{ SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo },
2092 	{ SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo },
2093 	{ SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer },
2094 	{ SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer },
2095 	{ SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client },
2096 	{ SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client },
2097 	{ SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool },
2098 	{ SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool },
2099 	{ SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription },
2100 	{ SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client },
2101 	{ SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port },
2102 	{ SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events },
2103 	{ SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs },
2104 	{ 0, NULL },
2105 };
2106 
snd_seq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2107 static long snd_seq_ioctl(struct file *file, unsigned int cmd,
2108 			  unsigned long arg)
2109 {
2110 	struct snd_seq_client *client = file->private_data;
2111 	/* To use kernel stack for ioctl data. */
2112 	union {
2113 		int pversion;
2114 		int client_id;
2115 		struct snd_seq_system_info	system_info;
2116 		struct snd_seq_running_info	running_info;
2117 		struct snd_seq_client_info	client_info;
2118 		struct snd_seq_port_info	port_info;
2119 		struct snd_seq_port_subscribe	port_subscribe;
2120 		struct snd_seq_queue_info	queue_info;
2121 		struct snd_seq_queue_status	queue_status;
2122 		struct snd_seq_queue_tempo	tempo;
2123 		struct snd_seq_queue_timer	queue_timer;
2124 		struct snd_seq_queue_client	queue_client;
2125 		struct snd_seq_client_pool	client_pool;
2126 		struct snd_seq_remove_events	remove_events;
2127 		struct snd_seq_query_subs	query_subs;
2128 	} buf;
2129 	const struct ioctl_handler *handler;
2130 	unsigned long size;
2131 	int err;
2132 
2133 	if (snd_BUG_ON(!client))
2134 		return -ENXIO;
2135 
2136 	for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
2137 		if (handler->cmd == cmd)
2138 			break;
2139 	}
2140 	if (handler->cmd == 0)
2141 		return -ENOTTY;
2142 
2143 	memset(&buf, 0, sizeof(buf));
2144 
2145 	/*
2146 	 * All of ioctl commands for ALSA sequencer get an argument of size
2147 	 * within 13 bits. We can safely pick up the size from the command.
2148 	 */
2149 	size = _IOC_SIZE(handler->cmd);
2150 	if (handler->cmd & IOC_IN) {
2151 		if (copy_from_user(&buf, (const void __user *)arg, size))
2152 			return -EFAULT;
2153 	}
2154 
2155 	mutex_lock(&client->ioctl_mutex);
2156 	err = handler->func(client, &buf);
2157 	mutex_unlock(&client->ioctl_mutex);
2158 	if (err >= 0) {
2159 		/* Some commands includes a bug in 'dir' field. */
2160 		if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
2161 		    handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL ||
2162 		    (handler->cmd & IOC_OUT))
2163 			if (copy_to_user((void __user *)arg, &buf, size))
2164 				return -EFAULT;
2165 	}
2166 
2167 	return err;
2168 }
2169 
2170 #ifdef CONFIG_COMPAT
2171 #include "seq_compat.c"
2172 #else
2173 #define snd_seq_ioctl_compat	NULL
2174 #endif
2175 
2176 /* -------------------------------------------------------- */
2177 
2178 
2179 /* exported to kernel modules */
snd_seq_create_kernel_client(struct snd_card * card,int client_index,const char * name_fmt,...)2180 int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
2181 				 const char *name_fmt, ...)
2182 {
2183 	struct snd_seq_client *client;
2184 	va_list args;
2185 
2186 	if (snd_BUG_ON(in_interrupt()))
2187 		return -EBUSY;
2188 
2189 	if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
2190 		return -EINVAL;
2191 	if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
2192 		return -EINVAL;
2193 
2194 	mutex_lock(&register_mutex);
2195 
2196 	if (card) {
2197 		client_index += SNDRV_SEQ_GLOBAL_CLIENTS
2198 			+ card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
2199 		if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
2200 			client_index = -1;
2201 	}
2202 
2203 	/* empty write queue as default */
2204 	client = seq_create_client1(client_index, 0);
2205 	if (client == NULL) {
2206 		mutex_unlock(&register_mutex);
2207 		return -EBUSY;	/* failure code */
2208 	}
2209 	usage_alloc(&client_usage, 1);
2210 
2211 	client->accept_input = 1;
2212 	client->accept_output = 1;
2213 	client->data.kernel.card = card;
2214 
2215 	va_start(args, name_fmt);
2216 	vsnprintf(client->name, sizeof(client->name), name_fmt, args);
2217 	va_end(args);
2218 
2219 	client->type = KERNEL_CLIENT;
2220 	mutex_unlock(&register_mutex);
2221 
2222 	/* make others aware this new client */
2223 	snd_seq_system_client_ev_client_start(client->number);
2224 
2225 	/* return client number to caller */
2226 	return client->number;
2227 }
2228 EXPORT_SYMBOL(snd_seq_create_kernel_client);
2229 
2230 /* exported to kernel modules */
snd_seq_delete_kernel_client(int client)2231 int snd_seq_delete_kernel_client(int client)
2232 {
2233 	struct snd_seq_client *ptr;
2234 
2235 	if (snd_BUG_ON(in_interrupt()))
2236 		return -EBUSY;
2237 
2238 	ptr = clientptr(client);
2239 	if (ptr == NULL)
2240 		return -EINVAL;
2241 
2242 	seq_free_client(ptr);
2243 	kfree(ptr);
2244 	return 0;
2245 }
2246 EXPORT_SYMBOL(snd_seq_delete_kernel_client);
2247 
2248 /*
2249  * exported, called by kernel clients to enqueue events (w/o blocking)
2250  *
2251  * RETURN VALUE: zero if succeed, negative if error
2252  */
snd_seq_kernel_client_enqueue(int client,struct snd_seq_event * ev,struct file * file,bool blocking)2253 int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
2254 				  struct file *file, bool blocking)
2255 {
2256 	struct snd_seq_client *cptr;
2257 	int result;
2258 
2259 	if (snd_BUG_ON(!ev))
2260 		return -EINVAL;
2261 
2262 	if (ev->type == SNDRV_SEQ_EVENT_NONE)
2263 		return 0; /* ignore this */
2264 	if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR)
2265 		return -EINVAL; /* quoted events can't be enqueued */
2266 
2267 	/* fill in client number */
2268 	ev->source.client = client;
2269 
2270 	if (check_event_type_and_length(ev))
2271 		return -EINVAL;
2272 
2273 	cptr = snd_seq_client_use_ptr(client);
2274 	if (cptr == NULL)
2275 		return -EINVAL;
2276 
2277 	if (!cptr->accept_output) {
2278 		result = -EPERM;
2279 	} else { /* send it */
2280 		mutex_lock(&cptr->ioctl_mutex);
2281 		result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
2282 						      false, 0,
2283 						      &cptr->ioctl_mutex);
2284 		mutex_unlock(&cptr->ioctl_mutex);
2285 	}
2286 
2287 	snd_seq_client_unlock(cptr);
2288 	return result;
2289 }
2290 EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
2291 
2292 /*
2293  * exported, called by kernel clients to dispatch events directly to other
2294  * clients, bypassing the queues.  Event time-stamp will be updated.
2295  *
2296  * RETURN VALUE: negative = delivery failed,
2297  *		 zero, or positive: the number of delivered events
2298  */
snd_seq_kernel_client_dispatch(int client,struct snd_seq_event * ev,int atomic,int hop)2299 int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
2300 				   int atomic, int hop)
2301 {
2302 	struct snd_seq_client *cptr;
2303 	int result;
2304 
2305 	if (snd_BUG_ON(!ev))
2306 		return -EINVAL;
2307 
2308 	/* fill in client number */
2309 	ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
2310 	ev->source.client = client;
2311 
2312 	if (check_event_type_and_length(ev))
2313 		return -EINVAL;
2314 
2315 	cptr = snd_seq_client_use_ptr(client);
2316 	if (cptr == NULL)
2317 		return -EINVAL;
2318 
2319 	if (!cptr->accept_output)
2320 		result = -EPERM;
2321 	else
2322 		result = snd_seq_deliver_event(cptr, ev, atomic, hop);
2323 
2324 	snd_seq_client_unlock(cptr);
2325 	return result;
2326 }
2327 EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
2328 
2329 /**
2330  * snd_seq_kernel_client_ctl - operate a command for a client with data in
2331  *			       kernel space.
2332  * @clientid:	A numerical ID for a client.
2333  * @cmd:	An ioctl(2) command for ALSA sequencer operation.
2334  * @arg:	A pointer to data in kernel space.
2335  *
2336  * Against its name, both kernel/application client can be handled by this
2337  * kernel API. A pointer of 'arg' argument should be in kernel space.
2338  *
2339  * Return: 0 at success. Negative error code at failure.
2340  */
snd_seq_kernel_client_ctl(int clientid,unsigned int cmd,void * arg)2341 int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
2342 {
2343 	const struct ioctl_handler *handler;
2344 	struct snd_seq_client *client;
2345 
2346 	client = clientptr(clientid);
2347 	if (client == NULL)
2348 		return -ENXIO;
2349 
2350 	for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
2351 		if (handler->cmd == cmd)
2352 			return handler->func(client, arg);
2353 	}
2354 
2355 	pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
2356 		 cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
2357 	return -ENOTTY;
2358 }
2359 EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
2360 
2361 /* exported (for OSS emulator) */
snd_seq_kernel_client_write_poll(int clientid,struct file * file,poll_table * wait)2362 int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait)
2363 {
2364 	struct snd_seq_client *client;
2365 
2366 	client = clientptr(clientid);
2367 	if (client == NULL)
2368 		return -ENXIO;
2369 
2370 	if (! snd_seq_write_pool_allocated(client))
2371 		return 1;
2372 	if (snd_seq_pool_poll_wait(client->pool, file, wait))
2373 		return 1;
2374 	return 0;
2375 }
2376 EXPORT_SYMBOL(snd_seq_kernel_client_write_poll);
2377 
2378 /*---------------------------------------------------------------------------*/
2379 
2380 #ifdef CONFIG_SND_PROC_FS
2381 /*
2382  *  /proc interface
2383  */
snd_seq_info_dump_subscribers(struct snd_info_buffer * buffer,struct snd_seq_port_subs_info * group,int is_src,char * msg)2384 static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer,
2385 					  struct snd_seq_port_subs_info *group,
2386 					  int is_src, char *msg)
2387 {
2388 	struct list_head *p;
2389 	struct snd_seq_subscribers *s;
2390 	int count = 0;
2391 
2392 	down_read(&group->list_mutex);
2393 	if (list_empty(&group->list_head)) {
2394 		up_read(&group->list_mutex);
2395 		return;
2396 	}
2397 	snd_iprintf(buffer, msg);
2398 	list_for_each(p, &group->list_head) {
2399 		if (is_src)
2400 			s = list_entry(p, struct snd_seq_subscribers, src_list);
2401 		else
2402 			s = list_entry(p, struct snd_seq_subscribers, dest_list);
2403 		if (count++)
2404 			snd_iprintf(buffer, ", ");
2405 		snd_iprintf(buffer, "%d:%d",
2406 			    is_src ? s->info.dest.client : s->info.sender.client,
2407 			    is_src ? s->info.dest.port : s->info.sender.port);
2408 		if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
2409 			snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
2410 		if (group->exclusive)
2411 			snd_iprintf(buffer, "[ex]");
2412 	}
2413 	up_read(&group->list_mutex);
2414 	snd_iprintf(buffer, "\n");
2415 }
2416 
2417 #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
2418 #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
2419 #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
2420 
2421 #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
2422 
snd_seq_info_dump_ports(struct snd_info_buffer * buffer,struct snd_seq_client * client)2423 static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
2424 				    struct snd_seq_client *client)
2425 {
2426 	struct snd_seq_client_port *p;
2427 
2428 	mutex_lock(&client->ports_mutex);
2429 	list_for_each_entry(p, &client->ports_list_head, list) {
2430 		snd_iprintf(buffer, "  Port %3d : \"%s\" (%c%c%c%c)\n",
2431 			    p->addr.port, p->name,
2432 			    FLAG_PERM_RD(p->capability),
2433 			    FLAG_PERM_WR(p->capability),
2434 			    FLAG_PERM_EX(p->capability),
2435 			    FLAG_PERM_DUPLEX(p->capability));
2436 		snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, "    Connecting To: ");
2437 		snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, "    Connected From: ");
2438 	}
2439 	mutex_unlock(&client->ports_mutex);
2440 }
2441 
2442 
2443 /* exported to seq_info.c */
snd_seq_info_clients_read(struct snd_info_entry * entry,struct snd_info_buffer * buffer)2444 void snd_seq_info_clients_read(struct snd_info_entry *entry,
2445 			       struct snd_info_buffer *buffer)
2446 {
2447 	int c;
2448 	struct snd_seq_client *client;
2449 
2450 	snd_iprintf(buffer, "Client info\n");
2451 	snd_iprintf(buffer, "  cur  clients : %d\n", client_usage.cur);
2452 	snd_iprintf(buffer, "  peak clients : %d\n", client_usage.peak);
2453 	snd_iprintf(buffer, "  max  clients : %d\n", SNDRV_SEQ_MAX_CLIENTS);
2454 	snd_iprintf(buffer, "\n");
2455 
2456 	/* list the client table */
2457 	for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
2458 		client = snd_seq_client_use_ptr(c);
2459 		if (client == NULL)
2460 			continue;
2461 		if (client->type == NO_CLIENT) {
2462 			snd_seq_client_unlock(client);
2463 			continue;
2464 		}
2465 
2466 		snd_iprintf(buffer, "Client %3d : \"%s\" [%s]\n",
2467 			    c, client->name,
2468 			    client->type == USER_CLIENT ? "User" : "Kernel");
2469 		snd_seq_info_dump_ports(buffer, client);
2470 		if (snd_seq_write_pool_allocated(client)) {
2471 			snd_iprintf(buffer, "  Output pool :\n");
2472 			snd_seq_info_pool(buffer, client->pool, "    ");
2473 		}
2474 		if (client->type == USER_CLIENT && client->data.user.fifo &&
2475 		    client->data.user.fifo->pool) {
2476 			snd_iprintf(buffer, "  Input pool :\n");
2477 			snd_seq_info_pool(buffer, client->data.user.fifo->pool, "    ");
2478 		}
2479 		snd_seq_client_unlock(client);
2480 	}
2481 }
2482 #endif /* CONFIG_SND_PROC_FS */
2483 
2484 /*---------------------------------------------------------------------------*/
2485 
2486 
2487 /*
2488  *  REGISTRATION PART
2489  */
2490 
2491 static const struct file_operations snd_seq_f_ops =
2492 {
2493 	.owner =	THIS_MODULE,
2494 	.read =		snd_seq_read,
2495 	.write =	snd_seq_write,
2496 	.open =		snd_seq_open,
2497 	.release =	snd_seq_release,
2498 	.llseek =	no_llseek,
2499 	.poll =		snd_seq_poll,
2500 	.unlocked_ioctl =	snd_seq_ioctl,
2501 	.compat_ioctl =	snd_seq_ioctl_compat,
2502 };
2503 
2504 static struct device seq_dev;
2505 
2506 /*
2507  * register sequencer device
2508  */
snd_sequencer_device_init(void)2509 int __init snd_sequencer_device_init(void)
2510 {
2511 	int err;
2512 
2513 	snd_device_initialize(&seq_dev, NULL);
2514 	dev_set_name(&seq_dev, "seq");
2515 
2516 	mutex_lock(&register_mutex);
2517 	err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
2518 				  &snd_seq_f_ops, NULL, &seq_dev);
2519 	mutex_unlock(&register_mutex);
2520 	if (err < 0) {
2521 		put_device(&seq_dev);
2522 		return err;
2523 	}
2524 
2525 	return 0;
2526 }
2527 
2528 
2529 
2530 /*
2531  * unregister sequencer device
2532  */
snd_sequencer_device_done(void)2533 void snd_sequencer_device_done(void)
2534 {
2535 	snd_unregister_device(&seq_dev);
2536 	put_device(&seq_dev);
2537 }
2538