1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * ALSA sequencer FIFO
4 * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
5 */
6
7 #include <sound/core.h>
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10
11 #include "seq_fifo.h"
12 #include "seq_lock.h"
13
14
15 /* FIFO */
16
17 /* create new fifo */
snd_seq_fifo_new(int poolsize)18 struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
19 {
20 struct snd_seq_fifo *f;
21
22 f = kzalloc(sizeof(*f), GFP_KERNEL);
23 if (!f)
24 return NULL;
25
26 f->pool = snd_seq_pool_new(poolsize);
27 if (f->pool == NULL) {
28 kfree(f);
29 return NULL;
30 }
31 if (snd_seq_pool_init(f->pool) < 0) {
32 snd_seq_pool_delete(&f->pool);
33 kfree(f);
34 return NULL;
35 }
36
37 spin_lock_init(&f->lock);
38 snd_use_lock_init(&f->use_lock);
39 init_waitqueue_head(&f->input_sleep);
40 atomic_set(&f->overflow, 0);
41
42 f->head = NULL;
43 f->tail = NULL;
44 f->cells = 0;
45
46 return f;
47 }
48
snd_seq_fifo_delete(struct snd_seq_fifo ** fifo)49 void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
50 {
51 struct snd_seq_fifo *f;
52
53 if (snd_BUG_ON(!fifo))
54 return;
55 f = *fifo;
56 if (snd_BUG_ON(!f))
57 return;
58 *fifo = NULL;
59
60 if (f->pool)
61 snd_seq_pool_mark_closing(f->pool);
62
63 snd_seq_fifo_clear(f);
64
65 /* wake up clients if any */
66 if (waitqueue_active(&f->input_sleep))
67 wake_up(&f->input_sleep);
68
69 /* release resources...*/
70 /*....................*/
71
72 if (f->pool) {
73 snd_seq_pool_done(f->pool);
74 snd_seq_pool_delete(&f->pool);
75 }
76
77 kfree(f);
78 }
79
80 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
81
82 /* clear queue */
snd_seq_fifo_clear(struct snd_seq_fifo * f)83 void snd_seq_fifo_clear(struct snd_seq_fifo *f)
84 {
85 struct snd_seq_event_cell *cell;
86
87 /* clear overflow flag */
88 atomic_set(&f->overflow, 0);
89
90 snd_use_lock_sync(&f->use_lock);
91 guard(spinlock_irq)(&f->lock);
92 /* drain the fifo */
93 while ((cell = fifo_cell_out(f)) != NULL) {
94 snd_seq_cell_free(cell);
95 }
96 }
97
98
99 /* enqueue event to fifo */
snd_seq_fifo_event_in(struct snd_seq_fifo * f,struct snd_seq_event * event)100 int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
101 struct snd_seq_event *event)
102 {
103 struct snd_seq_event_cell *cell;
104 int err;
105
106 if (snd_BUG_ON(!f))
107 return -EINVAL;
108
109 snd_use_lock_use(&f->use_lock);
110 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
111 if (err < 0) {
112 if ((err == -ENOMEM) || (err == -EAGAIN))
113 atomic_inc(&f->overflow);
114 snd_use_lock_free(&f->use_lock);
115 return err;
116 }
117
118 /* append new cells to fifo */
119 scoped_guard(spinlock_irqsave, &f->lock) {
120 if (f->tail != NULL)
121 f->tail->next = cell;
122 f->tail = cell;
123 if (f->head == NULL)
124 f->head = cell;
125 cell->next = NULL;
126 f->cells++;
127 }
128
129 /* wakeup client */
130 if (waitqueue_active(&f->input_sleep))
131 wake_up(&f->input_sleep);
132
133 snd_use_lock_free(&f->use_lock);
134
135 return 0; /* success */
136
137 }
138
139 /* dequeue cell from fifo */
fifo_cell_out(struct snd_seq_fifo * f)140 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
141 {
142 struct snd_seq_event_cell *cell;
143
144 cell = f->head;
145 if (cell) {
146 f->head = cell->next;
147
148 /* reset tail if this was the last element */
149 if (f->tail == cell)
150 f->tail = NULL;
151
152 cell->next = NULL;
153 f->cells--;
154 }
155
156 return cell;
157 }
158
159 /* dequeue cell from fifo and copy on user space */
snd_seq_fifo_cell_out(struct snd_seq_fifo * f,struct snd_seq_event_cell ** cellp,int nonblock)160 int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
161 struct snd_seq_event_cell **cellp, int nonblock)
162 {
163 struct snd_seq_event_cell *cell;
164 unsigned long flags;
165 wait_queue_entry_t wait;
166
167 if (snd_BUG_ON(!f))
168 return -EINVAL;
169
170 *cellp = NULL;
171 init_waitqueue_entry(&wait, current);
172 spin_lock_irqsave(&f->lock, flags);
173 while ((cell = fifo_cell_out(f)) == NULL) {
174 if (nonblock) {
175 /* non-blocking - return immediately */
176 spin_unlock_irqrestore(&f->lock, flags);
177 return -EAGAIN;
178 }
179 set_current_state(TASK_INTERRUPTIBLE);
180 add_wait_queue(&f->input_sleep, &wait);
181 spin_unlock_irqrestore(&f->lock, flags);
182 schedule();
183 spin_lock_irqsave(&f->lock, flags);
184 remove_wait_queue(&f->input_sleep, &wait);
185 if (signal_pending(current)) {
186 spin_unlock_irqrestore(&f->lock, flags);
187 return -ERESTARTSYS;
188 }
189 }
190 spin_unlock_irqrestore(&f->lock, flags);
191 *cellp = cell;
192
193 return 0;
194 }
195
196
snd_seq_fifo_cell_putback(struct snd_seq_fifo * f,struct snd_seq_event_cell * cell)197 void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
198 struct snd_seq_event_cell *cell)
199 {
200 if (cell) {
201 guard(spinlock_irqsave)(&f->lock);
202 cell->next = f->head;
203 f->head = cell;
204 if (!f->tail)
205 f->tail = cell;
206 f->cells++;
207 }
208 }
209
210
211 /* polling; return non-zero if queue is available */
snd_seq_fifo_poll_wait(struct snd_seq_fifo * f,struct file * file,poll_table * wait)212 int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
213 poll_table *wait)
214 {
215 poll_wait(file, &f->input_sleep, wait);
216 return (f->cells > 0);
217 }
218
219 /* change the size of pool; all old events are removed */
snd_seq_fifo_resize(struct snd_seq_fifo * f,int poolsize)220 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
221 {
222 struct snd_seq_pool *newpool, *oldpool;
223 struct snd_seq_event_cell *cell, *next, *oldhead;
224
225 if (snd_BUG_ON(!f || !f->pool))
226 return -EINVAL;
227
228 /* allocate new pool */
229 newpool = snd_seq_pool_new(poolsize);
230 if (newpool == NULL)
231 return -ENOMEM;
232 if (snd_seq_pool_init(newpool) < 0) {
233 snd_seq_pool_delete(&newpool);
234 return -ENOMEM;
235 }
236
237 scoped_guard(spinlock_irq, &f->lock) {
238 /* remember old pool */
239 oldpool = f->pool;
240 oldhead = f->head;
241 /* exchange pools */
242 f->pool = newpool;
243 f->head = NULL;
244 f->tail = NULL;
245 f->cells = 0;
246 /* NOTE: overflow flag is not cleared */
247 }
248
249 /* close the old pool and wait until all users are gone */
250 snd_seq_pool_mark_closing(oldpool);
251 snd_use_lock_sync(&f->use_lock);
252
253 /* release cells in old pool */
254 for (cell = oldhead; cell; cell = next) {
255 next = cell->next;
256 snd_seq_cell_free(cell);
257 }
258 snd_seq_pool_delete(&oldpool);
259
260 return 0;
261 }
262
263 /* get the number of unused cells safely */
snd_seq_fifo_unused_cells(struct snd_seq_fifo * f)264 int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
265 {
266 int cells;
267
268 if (!f)
269 return 0;
270
271 snd_use_lock_use(&f->use_lock);
272 scoped_guard(spinlock_irqsave, &f->lock)
273 cells = snd_seq_unused_cells(f->pool);
274 snd_use_lock_free(&f->use_lock);
275 return cells;
276 }
277