xref: /linux/sound/core/pcm_lib.c (revision 72c0f57d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Digital Audio (PCM) abstract layer
4  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5  *                   Abramo Bagnara <abramo@alsa-project.org>
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10 #include <linux/time.h>
11 #include <linux/math64.h>
12 #include <linux/export.h>
13 #include <sound/core.h>
14 #include <sound/control.h>
15 #include <sound/tlv.h>
16 #include <sound/info.h>
17 #include <sound/pcm.h>
18 #include <sound/pcm_params.h>
19 #include <sound/timer.h>
20 
21 #include "pcm_local.h"
22 
23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
24 #define CREATE_TRACE_POINTS
25 #include "pcm_trace.h"
26 #else
27 #define trace_hwptr(substream, pos, in_interrupt)
28 #define trace_xrun(substream)
29 #define trace_hw_ptr_error(substream, reason)
30 #define trace_applptr(substream, prev, curr)
31 #endif
32 
33 static int fill_silence_frames(struct snd_pcm_substream *substream,
34 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35 
36 
update_silence_vars(struct snd_pcm_runtime * runtime,snd_pcm_uframes_t ptr,snd_pcm_uframes_t new_ptr)37 static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
38 				       snd_pcm_uframes_t ptr,
39 				       snd_pcm_uframes_t new_ptr)
40 {
41 	snd_pcm_sframes_t delta;
42 
43 	delta = new_ptr - ptr;
44 	if (delta == 0)
45 		return;
46 	if (delta < 0)
47 		delta += runtime->boundary;
48 	if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
49 		runtime->silence_filled -= delta;
50 	else
51 		runtime->silence_filled = 0;
52 	runtime->silence_start = new_ptr;
53 }
54 
55 /*
56  * fill ring buffer with silence
57  * runtime->silence_start: starting pointer to silence area
58  * runtime->silence_filled: size filled with silence
59  * runtime->silence_threshold: threshold from application
60  * runtime->silence_size: maximal size from application
61  *
62  * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
63  */
snd_pcm_playback_silence(struct snd_pcm_substream * substream,snd_pcm_uframes_t new_hw_ptr)64 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
65 {
66 	struct snd_pcm_runtime *runtime = substream->runtime;
67 	snd_pcm_uframes_t frames, ofs, transfer;
68 	int err;
69 
70 	if (runtime->silence_size < runtime->boundary) {
71 		snd_pcm_sframes_t noise_dist;
72 		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
73 		update_silence_vars(runtime, runtime->silence_start, appl_ptr);
74 		/* initialization outside pointer updates */
75 		if (new_hw_ptr == ULONG_MAX)
76 			new_hw_ptr = runtime->status->hw_ptr;
77 		/* get hw_avail with the boundary crossing */
78 		noise_dist = appl_ptr - new_hw_ptr;
79 		if (noise_dist < 0)
80 			noise_dist += runtime->boundary;
81 		/* total noise distance */
82 		noise_dist += runtime->silence_filled;
83 		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
84 			return;
85 		frames = runtime->silence_threshold - noise_dist;
86 		if (frames > runtime->silence_size)
87 			frames = runtime->silence_size;
88 	} else {
89 		/*
90 		 * This filling mode aims at free-running mode (used for example by dmix),
91 		 * which doesn't update the application pointer.
92 		 */
93 		snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
94 		if (new_hw_ptr == ULONG_MAX) {
95 			/*
96 			 * Initialization, fill the whole unused buffer with silence.
97 			 *
98 			 * Usually, this is entered while stopped, before data is queued,
99 			 * so both pointers are expected to be zero.
100 			 */
101 			snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
102 			if (avail < 0)
103 				avail += runtime->boundary;
104 			/*
105 			 * In free-running mode, appl_ptr will be zero even while running,
106 			 * so we end up with a huge number. There is no useful way to
107 			 * handle this, so we just clear the whole buffer.
108 			 */
109 			runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
110 			runtime->silence_start = hw_ptr;
111 		} else {
112 			/* Silence the just played area immediately */
113 			update_silence_vars(runtime, hw_ptr, new_hw_ptr);
114 		}
115 		/*
116 		 * In this mode, silence_filled actually includes the valid
117 		 * sample data from the user.
118 		 */
119 		frames = runtime->buffer_size - runtime->silence_filled;
120 	}
121 	if (snd_BUG_ON(frames > runtime->buffer_size))
122 		return;
123 	if (frames == 0)
124 		return;
125 	ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
126 	do {
127 		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
128 		err = fill_silence_frames(substream, ofs, transfer);
129 		snd_BUG_ON(err < 0);
130 		runtime->silence_filled += transfer;
131 		frames -= transfer;
132 		ofs = 0;
133 	} while (frames > 0);
134 	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
135 }
136 
137 #ifdef CONFIG_SND_DEBUG
snd_pcm_debug_name(struct snd_pcm_substream * substream,char * name,size_t len)138 void snd_pcm_debug_name(struct snd_pcm_substream *substream,
139 			   char *name, size_t len)
140 {
141 	snprintf(name, len, "pcmC%dD%d%c:%d",
142 		 substream->pcm->card->number,
143 		 substream->pcm->device,
144 		 substream->stream ? 'c' : 'p',
145 		 substream->number);
146 }
147 EXPORT_SYMBOL(snd_pcm_debug_name);
148 #endif
149 
150 #define XRUN_DEBUG_BASIC	(1<<0)
151 #define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
152 #define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
153 
154 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
155 
156 #define xrun_debug(substream, mask) \
157 			((substream)->pstr->xrun_debug & (mask))
158 #else
159 #define xrun_debug(substream, mask)	0
160 #endif
161 
162 #define dump_stack_on_xrun(substream) do {			\
163 		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
164 			dump_stack();				\
165 	} while (0)
166 
167 /* call with stream lock held */
__snd_pcm_xrun(struct snd_pcm_substream * substream)168 void __snd_pcm_xrun(struct snd_pcm_substream *substream)
169 {
170 	struct snd_pcm_runtime *runtime = substream->runtime;
171 
172 	trace_xrun(substream);
173 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
174 		struct timespec64 tstamp;
175 
176 		snd_pcm_gettime(runtime, &tstamp);
177 		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
178 		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
179 	}
180 	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
181 	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
182 		char name[16];
183 		snd_pcm_debug_name(substream, name, sizeof(name));
184 		pcm_warn(substream->pcm, "XRUN: %s\n", name);
185 		dump_stack_on_xrun(substream);
186 	}
187 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
188 	substream->xrun_counter++;
189 #endif
190 }
191 
192 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
193 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
194 	do {								\
195 		trace_hw_ptr_error(substream, reason);	\
196 		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
197 			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
198 					   (in_interrupt) ? 'Q' : 'P', ##args);	\
199 			dump_stack_on_xrun(substream);			\
200 		}							\
201 	} while (0)
202 
203 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
204 
205 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
206 
207 #endif
208 
snd_pcm_update_state(struct snd_pcm_substream * substream,struct snd_pcm_runtime * runtime)209 int snd_pcm_update_state(struct snd_pcm_substream *substream,
210 			 struct snd_pcm_runtime *runtime)
211 {
212 	snd_pcm_uframes_t avail;
213 
214 	avail = snd_pcm_avail(substream);
215 	if (avail > runtime->avail_max)
216 		runtime->avail_max = avail;
217 	if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
218 		if (avail >= runtime->buffer_size) {
219 			snd_pcm_drain_done(substream);
220 			return -EPIPE;
221 		}
222 	} else {
223 		if (avail >= runtime->stop_threshold) {
224 			__snd_pcm_xrun(substream);
225 			return -EPIPE;
226 		}
227 	}
228 	if (runtime->twake) {
229 		if (avail >= runtime->twake)
230 			wake_up(&runtime->tsleep);
231 	} else if (avail >= runtime->control->avail_min)
232 		wake_up(&runtime->sleep);
233 	return 0;
234 }
235 
update_audio_tstamp(struct snd_pcm_substream * substream,struct timespec64 * curr_tstamp,struct timespec64 * audio_tstamp)236 static void update_audio_tstamp(struct snd_pcm_substream *substream,
237 				struct timespec64 *curr_tstamp,
238 				struct timespec64 *audio_tstamp)
239 {
240 	struct snd_pcm_runtime *runtime = substream->runtime;
241 	u64 audio_frames, audio_nsecs;
242 	struct timespec64 driver_tstamp;
243 
244 	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
245 		return;
246 
247 	if (!(substream->ops->get_time_info) ||
248 		(runtime->audio_tstamp_report.actual_type ==
249 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
250 
251 		/*
252 		 * provide audio timestamp derived from pointer position
253 		 * add delay only if requested
254 		 */
255 
256 		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
257 
258 		if (runtime->audio_tstamp_config.report_delay) {
259 			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
260 				audio_frames -=  runtime->delay;
261 			else
262 				audio_frames +=  runtime->delay;
263 		}
264 		audio_nsecs = div_u64(audio_frames * 1000000000LL,
265 				runtime->rate);
266 		*audio_tstamp = ns_to_timespec64(audio_nsecs);
267 	}
268 
269 	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
270 	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
271 		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
272 		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
273 		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
274 		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
275 	}
276 
277 
278 	/*
279 	 * re-take a driver timestamp to let apps detect if the reference tstamp
280 	 * read by low-level hardware was provided with a delay
281 	 */
282 	snd_pcm_gettime(substream->runtime, &driver_tstamp);
283 	runtime->driver_tstamp = driver_tstamp;
284 }
285 
snd_pcm_update_hw_ptr0(struct snd_pcm_substream * substream,unsigned int in_interrupt)286 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
287 				  unsigned int in_interrupt)
288 {
289 	struct snd_pcm_runtime *runtime = substream->runtime;
290 	snd_pcm_uframes_t pos;
291 	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
292 	snd_pcm_sframes_t hdelta, delta;
293 	unsigned long jdelta;
294 	unsigned long curr_jiffies;
295 	struct timespec64 curr_tstamp;
296 	struct timespec64 audio_tstamp;
297 	int crossed_boundary = 0;
298 
299 	old_hw_ptr = runtime->status->hw_ptr;
300 
301 	/*
302 	 * group pointer, time and jiffies reads to allow for more
303 	 * accurate correlations/corrections.
304 	 * The values are stored at the end of this routine after
305 	 * corrections for hw_ptr position
306 	 */
307 	pos = substream->ops->pointer(substream);
308 	curr_jiffies = jiffies;
309 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
310 		if ((substream->ops->get_time_info) &&
311 			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
312 			substream->ops->get_time_info(substream, &curr_tstamp,
313 						&audio_tstamp,
314 						&runtime->audio_tstamp_config,
315 						&runtime->audio_tstamp_report);
316 
317 			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
318 			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
319 				snd_pcm_gettime(runtime, &curr_tstamp);
320 		} else
321 			snd_pcm_gettime(runtime, &curr_tstamp);
322 	}
323 
324 	if (pos == SNDRV_PCM_POS_XRUN) {
325 		__snd_pcm_xrun(substream);
326 		return -EPIPE;
327 	}
328 	if (pos >= runtime->buffer_size) {
329 		if (printk_ratelimit()) {
330 			char name[16];
331 			snd_pcm_debug_name(substream, name, sizeof(name));
332 			pcm_err(substream->pcm,
333 				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
334 				name, pos, runtime->buffer_size,
335 				runtime->period_size);
336 		}
337 		pos = 0;
338 	}
339 	pos -= pos % runtime->min_align;
340 	trace_hwptr(substream, pos, in_interrupt);
341 	hw_base = runtime->hw_ptr_base;
342 	new_hw_ptr = hw_base + pos;
343 	if (in_interrupt) {
344 		/* we know that one period was processed */
345 		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
346 		delta = runtime->hw_ptr_interrupt + runtime->period_size;
347 		if (delta > new_hw_ptr) {
348 			/* check for double acknowledged interrupts */
349 			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
350 			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
351 				hw_base += runtime->buffer_size;
352 				if (hw_base >= runtime->boundary) {
353 					hw_base = 0;
354 					crossed_boundary++;
355 				}
356 				new_hw_ptr = hw_base + pos;
357 				goto __delta;
358 			}
359 		}
360 	}
361 	/* new_hw_ptr might be lower than old_hw_ptr in case when */
362 	/* pointer crosses the end of the ring buffer */
363 	if (new_hw_ptr < old_hw_ptr) {
364 		hw_base += runtime->buffer_size;
365 		if (hw_base >= runtime->boundary) {
366 			hw_base = 0;
367 			crossed_boundary++;
368 		}
369 		new_hw_ptr = hw_base + pos;
370 	}
371       __delta:
372 	delta = new_hw_ptr - old_hw_ptr;
373 	if (delta < 0)
374 		delta += runtime->boundary;
375 
376 	if (runtime->no_period_wakeup) {
377 		snd_pcm_sframes_t xrun_threshold;
378 		/*
379 		 * Without regular period interrupts, we have to check
380 		 * the elapsed time to detect xruns.
381 		 */
382 		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
383 		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
384 			goto no_delta_check;
385 		hdelta = jdelta - delta * HZ / runtime->rate;
386 		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
387 		while (hdelta > xrun_threshold) {
388 			delta += runtime->buffer_size;
389 			hw_base += runtime->buffer_size;
390 			if (hw_base >= runtime->boundary) {
391 				hw_base = 0;
392 				crossed_boundary++;
393 			}
394 			new_hw_ptr = hw_base + pos;
395 			hdelta -= runtime->hw_ptr_buffer_jiffies;
396 		}
397 		goto no_delta_check;
398 	}
399 
400 	/* something must be really wrong */
401 	if (delta >= runtime->buffer_size + runtime->period_size) {
402 		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
403 			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
404 			     substream->stream, (long)pos,
405 			     (long)new_hw_ptr, (long)old_hw_ptr);
406 		return 0;
407 	}
408 
409 	/* Do jiffies check only in xrun_debug mode */
410 	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
411 		goto no_jiffies_check;
412 
413 	/* Skip the jiffies check for hardwares with BATCH flag.
414 	 * Such hardware usually just increases the position at each IRQ,
415 	 * thus it can't give any strange position.
416 	 */
417 	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
418 		goto no_jiffies_check;
419 	hdelta = delta;
420 	if (hdelta < runtime->delay)
421 		goto no_jiffies_check;
422 	hdelta -= runtime->delay;
423 	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
424 	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
425 		delta = jdelta /
426 			(((runtime->period_size * HZ) / runtime->rate)
427 								+ HZ/100);
428 		/* move new_hw_ptr according jiffies not pos variable */
429 		new_hw_ptr = old_hw_ptr;
430 		hw_base = delta;
431 		/* use loop to avoid checks for delta overflows */
432 		/* the delta value is small or zero in most cases */
433 		while (delta > 0) {
434 			new_hw_ptr += runtime->period_size;
435 			if (new_hw_ptr >= runtime->boundary) {
436 				new_hw_ptr -= runtime->boundary;
437 				crossed_boundary--;
438 			}
439 			delta--;
440 		}
441 		/* align hw_base to buffer_size */
442 		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
443 			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
444 			     (long)pos, (long)hdelta,
445 			     (long)runtime->period_size, jdelta,
446 			     ((hdelta * HZ) / runtime->rate), hw_base,
447 			     (unsigned long)old_hw_ptr,
448 			     (unsigned long)new_hw_ptr);
449 		/* reset values to proper state */
450 		delta = 0;
451 		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
452 	}
453  no_jiffies_check:
454 	if (delta > runtime->period_size + runtime->period_size / 2) {
455 		hw_ptr_error(substream, in_interrupt,
456 			     "Lost interrupts?",
457 			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
458 			     substream->stream, (long)delta,
459 			     (long)new_hw_ptr,
460 			     (long)old_hw_ptr);
461 	}
462 
463  no_delta_check:
464 	if (runtime->status->hw_ptr == new_hw_ptr) {
465 		runtime->hw_ptr_jiffies = curr_jiffies;
466 		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
467 		return 0;
468 	}
469 
470 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
471 	    runtime->silence_size > 0)
472 		snd_pcm_playback_silence(substream, new_hw_ptr);
473 
474 	if (in_interrupt) {
475 		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
476 		if (delta < 0)
477 			delta += runtime->boundary;
478 		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
479 		runtime->hw_ptr_interrupt += delta;
480 		if (runtime->hw_ptr_interrupt >= runtime->boundary)
481 			runtime->hw_ptr_interrupt -= runtime->boundary;
482 	}
483 	runtime->hw_ptr_base = hw_base;
484 	runtime->status->hw_ptr = new_hw_ptr;
485 	runtime->hw_ptr_jiffies = curr_jiffies;
486 	if (crossed_boundary) {
487 		snd_BUG_ON(crossed_boundary != 1);
488 		runtime->hw_ptr_wrap += runtime->boundary;
489 	}
490 
491 	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
492 
493 	return snd_pcm_update_state(substream, runtime);
494 }
495 
496 /* CAUTION: call it with irq disabled */
snd_pcm_update_hw_ptr(struct snd_pcm_substream * substream)497 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
498 {
499 	return snd_pcm_update_hw_ptr0(substream, 0);
500 }
501 
502 /**
503  * snd_pcm_set_ops - set the PCM operators
504  * @pcm: the pcm instance
505  * @direction: stream direction, SNDRV_PCM_STREAM_XXX
506  * @ops: the operator table
507  *
508  * Sets the given PCM operators to the pcm instance.
509  */
snd_pcm_set_ops(struct snd_pcm * pcm,int direction,const struct snd_pcm_ops * ops)510 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
511 		     const struct snd_pcm_ops *ops)
512 {
513 	struct snd_pcm_str *stream = &pcm->streams[direction];
514 	struct snd_pcm_substream *substream;
515 
516 	for (substream = stream->substream; substream != NULL; substream = substream->next)
517 		substream->ops = ops;
518 }
519 EXPORT_SYMBOL(snd_pcm_set_ops);
520 
521 /**
522  * snd_pcm_set_sync_per_card - set the PCM sync id with card number
523  * @substream: the pcm substream
524  * @params: modified hardware parameters
525  * @id: identifier (max 12 bytes)
526  * @len: identifier length (max 12 bytes)
527  *
528  * Sets the PCM sync identifier for the card with zero padding.
529  *
530  * User space or any user should use this 16-byte identifier for a comparison only
531  * to check if two IDs are similar or different. Special case is the identifier
532  * containing only zeros. Interpretation for this combination is - empty (not set).
533  * The contents of the identifier should not be interpreted in any other way.
534  *
535  * The synchronization ID must be unique per clock source (usually one sound card,
536  * but multiple soundcard may use one PCM word clock source which means that they
537  * are fully synchronized).
538  *
539  * This routine composes this ID using card number in first four bytes and
540  * 12-byte additional ID. When other ID composition is used (e.g. for multiple
541  * sound cards), make sure that the composition does not clash with this
542  * composition scheme.
543  */
snd_pcm_set_sync_per_card(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params,const unsigned char * id,unsigned int len)544 void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream,
545 			       struct snd_pcm_hw_params *params,
546 			       const unsigned char *id, unsigned int len)
547 {
548 	*(__u32 *)params->sync = cpu_to_le32(substream->pcm->card->number);
549 	len = min(12, len);
550 	memcpy(params->sync + 4, id, len);
551 	memset(params->sync + 4 + len, 0, 12 - len);
552 }
553 EXPORT_SYMBOL_GPL(snd_pcm_set_sync_per_card);
554 
555 /*
556  *  Standard ioctl routine
557  */
558 
div32(unsigned int a,unsigned int b,unsigned int * r)559 static inline unsigned int div32(unsigned int a, unsigned int b,
560 				 unsigned int *r)
561 {
562 	if (b == 0) {
563 		*r = 0;
564 		return UINT_MAX;
565 	}
566 	*r = a % b;
567 	return a / b;
568 }
569 
div_down(unsigned int a,unsigned int b)570 static inline unsigned int div_down(unsigned int a, unsigned int b)
571 {
572 	if (b == 0)
573 		return UINT_MAX;
574 	return a / b;
575 }
576 
div_up(unsigned int a,unsigned int b)577 static inline unsigned int div_up(unsigned int a, unsigned int b)
578 {
579 	unsigned int r;
580 	unsigned int q;
581 	if (b == 0)
582 		return UINT_MAX;
583 	q = div32(a, b, &r);
584 	if (r)
585 		++q;
586 	return q;
587 }
588 
mul(unsigned int a,unsigned int b)589 static inline unsigned int mul(unsigned int a, unsigned int b)
590 {
591 	if (a == 0)
592 		return 0;
593 	if (div_down(UINT_MAX, a) < b)
594 		return UINT_MAX;
595 	return a * b;
596 }
597 
muldiv32(unsigned int a,unsigned int b,unsigned int c,unsigned int * r)598 static inline unsigned int muldiv32(unsigned int a, unsigned int b,
599 				    unsigned int c, unsigned int *r)
600 {
601 	u_int64_t n = (u_int64_t) a * b;
602 	if (c == 0) {
603 		*r = 0;
604 		return UINT_MAX;
605 	}
606 	n = div_u64_rem(n, c, r);
607 	if (n >= UINT_MAX) {
608 		*r = 0;
609 		return UINT_MAX;
610 	}
611 	return n;
612 }
613 
614 /**
615  * snd_interval_refine - refine the interval value of configurator
616  * @i: the interval value to refine
617  * @v: the interval value to refer to
618  *
619  * Refines the interval value with the reference value.
620  * The interval is changed to the range satisfying both intervals.
621  * The interval status (min, max, integer, etc.) are evaluated.
622  *
623  * Return: Positive if the value is changed, zero if it's not changed, or a
624  * negative error code.
625  */
snd_interval_refine(struct snd_interval * i,const struct snd_interval * v)626 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
627 {
628 	int changed = 0;
629 	if (snd_BUG_ON(snd_interval_empty(i)))
630 		return -EINVAL;
631 	if (i->min < v->min) {
632 		i->min = v->min;
633 		i->openmin = v->openmin;
634 		changed = 1;
635 	} else if (i->min == v->min && !i->openmin && v->openmin) {
636 		i->openmin = 1;
637 		changed = 1;
638 	}
639 	if (i->max > v->max) {
640 		i->max = v->max;
641 		i->openmax = v->openmax;
642 		changed = 1;
643 	} else if (i->max == v->max && !i->openmax && v->openmax) {
644 		i->openmax = 1;
645 		changed = 1;
646 	}
647 	if (!i->integer && v->integer) {
648 		i->integer = 1;
649 		changed = 1;
650 	}
651 	if (i->integer) {
652 		if (i->openmin) {
653 			i->min++;
654 			i->openmin = 0;
655 		}
656 		if (i->openmax) {
657 			i->max--;
658 			i->openmax = 0;
659 		}
660 	} else if (!i->openmin && !i->openmax && i->min == i->max)
661 		i->integer = 1;
662 	if (snd_interval_checkempty(i)) {
663 		snd_interval_none(i);
664 		return -EINVAL;
665 	}
666 	return changed;
667 }
668 EXPORT_SYMBOL(snd_interval_refine);
669 
snd_interval_refine_first(struct snd_interval * i)670 static int snd_interval_refine_first(struct snd_interval *i)
671 {
672 	const unsigned int last_max = i->max;
673 
674 	if (snd_BUG_ON(snd_interval_empty(i)))
675 		return -EINVAL;
676 	if (snd_interval_single(i))
677 		return 0;
678 	i->max = i->min;
679 	if (i->openmin)
680 		i->max++;
681 	/* only exclude max value if also excluded before refine */
682 	i->openmax = (i->openmax && i->max >= last_max);
683 	return 1;
684 }
685 
snd_interval_refine_last(struct snd_interval * i)686 static int snd_interval_refine_last(struct snd_interval *i)
687 {
688 	const unsigned int last_min = i->min;
689 
690 	if (snd_BUG_ON(snd_interval_empty(i)))
691 		return -EINVAL;
692 	if (snd_interval_single(i))
693 		return 0;
694 	i->min = i->max;
695 	if (i->openmax)
696 		i->min--;
697 	/* only exclude min value if also excluded before refine */
698 	i->openmin = (i->openmin && i->min <= last_min);
699 	return 1;
700 }
701 
snd_interval_mul(const struct snd_interval * a,const struct snd_interval * b,struct snd_interval * c)702 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
703 {
704 	if (a->empty || b->empty) {
705 		snd_interval_none(c);
706 		return;
707 	}
708 	c->empty = 0;
709 	c->min = mul(a->min, b->min);
710 	c->openmin = (a->openmin || b->openmin);
711 	c->max = mul(a->max,  b->max);
712 	c->openmax = (a->openmax || b->openmax);
713 	c->integer = (a->integer && b->integer);
714 }
715 
716 /**
717  * snd_interval_div - refine the interval value with division
718  * @a: dividend
719  * @b: divisor
720  * @c: quotient
721  *
722  * c = a / b
723  *
724  * Returns non-zero if the value is changed, zero if not changed.
725  */
snd_interval_div(const struct snd_interval * a,const struct snd_interval * b,struct snd_interval * c)726 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
727 {
728 	unsigned int r;
729 	if (a->empty || b->empty) {
730 		snd_interval_none(c);
731 		return;
732 	}
733 	c->empty = 0;
734 	c->min = div32(a->min, b->max, &r);
735 	c->openmin = (r || a->openmin || b->openmax);
736 	if (b->min > 0) {
737 		c->max = div32(a->max, b->min, &r);
738 		if (r) {
739 			c->max++;
740 			c->openmax = 1;
741 		} else
742 			c->openmax = (a->openmax || b->openmin);
743 	} else {
744 		c->max = UINT_MAX;
745 		c->openmax = 0;
746 	}
747 	c->integer = 0;
748 }
749 
750 /**
751  * snd_interval_muldivk - refine the interval value
752  * @a: dividend 1
753  * @b: dividend 2
754  * @k: divisor (as integer)
755  * @c: result
756   *
757  * c = a * b / k
758  *
759  * Returns non-zero if the value is changed, zero if not changed.
760  */
snd_interval_muldivk(const struct snd_interval * a,const struct snd_interval * b,unsigned int k,struct snd_interval * c)761 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
762 		      unsigned int k, struct snd_interval *c)
763 {
764 	unsigned int r;
765 	if (a->empty || b->empty) {
766 		snd_interval_none(c);
767 		return;
768 	}
769 	c->empty = 0;
770 	c->min = muldiv32(a->min, b->min, k, &r);
771 	c->openmin = (r || a->openmin || b->openmin);
772 	c->max = muldiv32(a->max, b->max, k, &r);
773 	if (r) {
774 		c->max++;
775 		c->openmax = 1;
776 	} else
777 		c->openmax = (a->openmax || b->openmax);
778 	c->integer = 0;
779 }
780 
781 /**
782  * snd_interval_mulkdiv - refine the interval value
783  * @a: dividend 1
784  * @k: dividend 2 (as integer)
785  * @b: divisor
786  * @c: result
787  *
788  * c = a * k / b
789  *
790  * Returns non-zero if the value is changed, zero if not changed.
791  */
snd_interval_mulkdiv(const struct snd_interval * a,unsigned int k,const struct snd_interval * b,struct snd_interval * c)792 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
793 		      const struct snd_interval *b, struct snd_interval *c)
794 {
795 	unsigned int r;
796 	if (a->empty || b->empty) {
797 		snd_interval_none(c);
798 		return;
799 	}
800 	c->empty = 0;
801 	c->min = muldiv32(a->min, k, b->max, &r);
802 	c->openmin = (r || a->openmin || b->openmax);
803 	if (b->min > 0) {
804 		c->max = muldiv32(a->max, k, b->min, &r);
805 		if (r) {
806 			c->max++;
807 			c->openmax = 1;
808 		} else
809 			c->openmax = (a->openmax || b->openmin);
810 	} else {
811 		c->max = UINT_MAX;
812 		c->openmax = 0;
813 	}
814 	c->integer = 0;
815 }
816 
817 /* ---- */
818 
819 
820 /**
821  * snd_interval_ratnum - refine the interval value
822  * @i: interval to refine
823  * @rats_count: number of ratnum_t
824  * @rats: ratnum_t array
825  * @nump: pointer to store the resultant numerator
826  * @denp: pointer to store the resultant denominator
827  *
828  * Return: Positive if the value is changed, zero if it's not changed, or a
829  * negative error code.
830  */
snd_interval_ratnum(struct snd_interval * i,unsigned int rats_count,const struct snd_ratnum * rats,unsigned int * nump,unsigned int * denp)831 int snd_interval_ratnum(struct snd_interval *i,
832 			unsigned int rats_count, const struct snd_ratnum *rats,
833 			unsigned int *nump, unsigned int *denp)
834 {
835 	unsigned int best_num, best_den;
836 	int best_diff;
837 	unsigned int k;
838 	struct snd_interval t;
839 	int err;
840 	unsigned int result_num, result_den;
841 	int result_diff;
842 
843 	best_num = best_den = best_diff = 0;
844 	for (k = 0; k < rats_count; ++k) {
845 		unsigned int num = rats[k].num;
846 		unsigned int den;
847 		unsigned int q = i->min;
848 		int diff;
849 		if (q == 0)
850 			q = 1;
851 		den = div_up(num, q);
852 		if (den < rats[k].den_min)
853 			continue;
854 		if (den > rats[k].den_max)
855 			den = rats[k].den_max;
856 		else {
857 			unsigned int r;
858 			r = (den - rats[k].den_min) % rats[k].den_step;
859 			if (r != 0)
860 				den -= r;
861 		}
862 		diff = num - q * den;
863 		if (diff < 0)
864 			diff = -diff;
865 		if (best_num == 0 ||
866 		    diff * best_den < best_diff * den) {
867 			best_diff = diff;
868 			best_den = den;
869 			best_num = num;
870 		}
871 	}
872 	if (best_den == 0) {
873 		i->empty = 1;
874 		return -EINVAL;
875 	}
876 	t.min = div_down(best_num, best_den);
877 	t.openmin = !!(best_num % best_den);
878 
879 	result_num = best_num;
880 	result_diff = best_diff;
881 	result_den = best_den;
882 	best_num = best_den = best_diff = 0;
883 	for (k = 0; k < rats_count; ++k) {
884 		unsigned int num = rats[k].num;
885 		unsigned int den;
886 		unsigned int q = i->max;
887 		int diff;
888 		if (q == 0) {
889 			i->empty = 1;
890 			return -EINVAL;
891 		}
892 		den = div_down(num, q);
893 		if (den > rats[k].den_max)
894 			continue;
895 		if (den < rats[k].den_min)
896 			den = rats[k].den_min;
897 		else {
898 			unsigned int r;
899 			r = (den - rats[k].den_min) % rats[k].den_step;
900 			if (r != 0)
901 				den += rats[k].den_step - r;
902 		}
903 		diff = q * den - num;
904 		if (diff < 0)
905 			diff = -diff;
906 		if (best_num == 0 ||
907 		    diff * best_den < best_diff * den) {
908 			best_diff = diff;
909 			best_den = den;
910 			best_num = num;
911 		}
912 	}
913 	if (best_den == 0) {
914 		i->empty = 1;
915 		return -EINVAL;
916 	}
917 	t.max = div_up(best_num, best_den);
918 	t.openmax = !!(best_num % best_den);
919 	t.integer = 0;
920 	err = snd_interval_refine(i, &t);
921 	if (err < 0)
922 		return err;
923 
924 	if (snd_interval_single(i)) {
925 		if (best_diff * result_den < result_diff * best_den) {
926 			result_num = best_num;
927 			result_den = best_den;
928 		}
929 		if (nump)
930 			*nump = result_num;
931 		if (denp)
932 			*denp = result_den;
933 	}
934 	return err;
935 }
936 EXPORT_SYMBOL(snd_interval_ratnum);
937 
938 /**
939  * snd_interval_ratden - refine the interval value
940  * @i: interval to refine
941  * @rats_count: number of struct ratden
942  * @rats: struct ratden array
943  * @nump: pointer to store the resultant numerator
944  * @denp: pointer to store the resultant denominator
945  *
946  * Return: Positive if the value is changed, zero if it's not changed, or a
947  * negative error code.
948  */
snd_interval_ratden(struct snd_interval * i,unsigned int rats_count,const struct snd_ratden * rats,unsigned int * nump,unsigned int * denp)949 static int snd_interval_ratden(struct snd_interval *i,
950 			       unsigned int rats_count,
951 			       const struct snd_ratden *rats,
952 			       unsigned int *nump, unsigned int *denp)
953 {
954 	unsigned int best_num, best_diff, best_den;
955 	unsigned int k;
956 	struct snd_interval t;
957 	int err;
958 
959 	best_num = best_den = best_diff = 0;
960 	for (k = 0; k < rats_count; ++k) {
961 		unsigned int num;
962 		unsigned int den = rats[k].den;
963 		unsigned int q = i->min;
964 		int diff;
965 		num = mul(q, den);
966 		if (num > rats[k].num_max)
967 			continue;
968 		if (num < rats[k].num_min)
969 			num = rats[k].num_max;
970 		else {
971 			unsigned int r;
972 			r = (num - rats[k].num_min) % rats[k].num_step;
973 			if (r != 0)
974 				num += rats[k].num_step - r;
975 		}
976 		diff = num - q * den;
977 		if (best_num == 0 ||
978 		    diff * best_den < best_diff * den) {
979 			best_diff = diff;
980 			best_den = den;
981 			best_num = num;
982 		}
983 	}
984 	if (best_den == 0) {
985 		i->empty = 1;
986 		return -EINVAL;
987 	}
988 	t.min = div_down(best_num, best_den);
989 	t.openmin = !!(best_num % best_den);
990 
991 	best_num = best_den = best_diff = 0;
992 	for (k = 0; k < rats_count; ++k) {
993 		unsigned int num;
994 		unsigned int den = rats[k].den;
995 		unsigned int q = i->max;
996 		int diff;
997 		num = mul(q, den);
998 		if (num < rats[k].num_min)
999 			continue;
1000 		if (num > rats[k].num_max)
1001 			num = rats[k].num_max;
1002 		else {
1003 			unsigned int r;
1004 			r = (num - rats[k].num_min) % rats[k].num_step;
1005 			if (r != 0)
1006 				num -= r;
1007 		}
1008 		diff = q * den - num;
1009 		if (best_num == 0 ||
1010 		    diff * best_den < best_diff * den) {
1011 			best_diff = diff;
1012 			best_den = den;
1013 			best_num = num;
1014 		}
1015 	}
1016 	if (best_den == 0) {
1017 		i->empty = 1;
1018 		return -EINVAL;
1019 	}
1020 	t.max = div_up(best_num, best_den);
1021 	t.openmax = !!(best_num % best_den);
1022 	t.integer = 0;
1023 	err = snd_interval_refine(i, &t);
1024 	if (err < 0)
1025 		return err;
1026 
1027 	if (snd_interval_single(i)) {
1028 		if (nump)
1029 			*nump = best_num;
1030 		if (denp)
1031 			*denp = best_den;
1032 	}
1033 	return err;
1034 }
1035 
1036 /**
1037  * snd_interval_list - refine the interval value from the list
1038  * @i: the interval value to refine
1039  * @count: the number of elements in the list
1040  * @list: the value list
1041  * @mask: the bit-mask to evaluate
1042  *
1043  * Refines the interval value from the list.
1044  * When mask is non-zero, only the elements corresponding to bit 1 are
1045  * evaluated.
1046  *
1047  * Return: Positive if the value is changed, zero if it's not changed, or a
1048  * negative error code.
1049  */
snd_interval_list(struct snd_interval * i,unsigned int count,const unsigned int * list,unsigned int mask)1050 int snd_interval_list(struct snd_interval *i, unsigned int count,
1051 		      const unsigned int *list, unsigned int mask)
1052 {
1053         unsigned int k;
1054 	struct snd_interval list_range;
1055 
1056 	if (!count) {
1057 		i->empty = 1;
1058 		return -EINVAL;
1059 	}
1060 	snd_interval_any(&list_range);
1061 	list_range.min = UINT_MAX;
1062 	list_range.max = 0;
1063         for (k = 0; k < count; k++) {
1064 		if (mask && !(mask & (1 << k)))
1065 			continue;
1066 		if (!snd_interval_test(i, list[k]))
1067 			continue;
1068 		list_range.min = min(list_range.min, list[k]);
1069 		list_range.max = max(list_range.max, list[k]);
1070         }
1071 	return snd_interval_refine(i, &list_range);
1072 }
1073 EXPORT_SYMBOL(snd_interval_list);
1074 
1075 /**
1076  * snd_interval_ranges - refine the interval value from the list of ranges
1077  * @i: the interval value to refine
1078  * @count: the number of elements in the list of ranges
1079  * @ranges: the ranges list
1080  * @mask: the bit-mask to evaluate
1081  *
1082  * Refines the interval value from the list of ranges.
1083  * When mask is non-zero, only the elements corresponding to bit 1 are
1084  * evaluated.
1085  *
1086  * Return: Positive if the value is changed, zero if it's not changed, or a
1087  * negative error code.
1088  */
snd_interval_ranges(struct snd_interval * i,unsigned int count,const struct snd_interval * ranges,unsigned int mask)1089 int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1090 			const struct snd_interval *ranges, unsigned int mask)
1091 {
1092 	unsigned int k;
1093 	struct snd_interval range_union;
1094 	struct snd_interval range;
1095 
1096 	if (!count) {
1097 		snd_interval_none(i);
1098 		return -EINVAL;
1099 	}
1100 	snd_interval_any(&range_union);
1101 	range_union.min = UINT_MAX;
1102 	range_union.max = 0;
1103 	for (k = 0; k < count; k++) {
1104 		if (mask && !(mask & (1 << k)))
1105 			continue;
1106 		snd_interval_copy(&range, &ranges[k]);
1107 		if (snd_interval_refine(&range, i) < 0)
1108 			continue;
1109 		if (snd_interval_empty(&range))
1110 			continue;
1111 
1112 		if (range.min < range_union.min) {
1113 			range_union.min = range.min;
1114 			range_union.openmin = 1;
1115 		}
1116 		if (range.min == range_union.min && !range.openmin)
1117 			range_union.openmin = 0;
1118 		if (range.max > range_union.max) {
1119 			range_union.max = range.max;
1120 			range_union.openmax = 1;
1121 		}
1122 		if (range.max == range_union.max && !range.openmax)
1123 			range_union.openmax = 0;
1124 	}
1125 	return snd_interval_refine(i, &range_union);
1126 }
1127 EXPORT_SYMBOL(snd_interval_ranges);
1128 
snd_interval_step(struct snd_interval * i,unsigned int step)1129 static int snd_interval_step(struct snd_interval *i, unsigned int step)
1130 {
1131 	unsigned int n;
1132 	int changed = 0;
1133 	n = i->min % step;
1134 	if (n != 0 || i->openmin) {
1135 		i->min += step - n;
1136 		i->openmin = 0;
1137 		changed = 1;
1138 	}
1139 	n = i->max % step;
1140 	if (n != 0 || i->openmax) {
1141 		i->max -= n;
1142 		i->openmax = 0;
1143 		changed = 1;
1144 	}
1145 	if (snd_interval_checkempty(i)) {
1146 		i->empty = 1;
1147 		return -EINVAL;
1148 	}
1149 	return changed;
1150 }
1151 
1152 /* Info constraints helpers */
1153 
1154 /**
1155  * snd_pcm_hw_rule_add - add the hw-constraint rule
1156  * @runtime: the pcm runtime instance
1157  * @cond: condition bits
1158  * @var: the variable to evaluate
1159  * @func: the evaluation function
1160  * @private: the private data pointer passed to function
1161  * @dep: the dependent variables
1162  *
1163  * Return: Zero if successful, or a negative error code on failure.
1164  */
snd_pcm_hw_rule_add(struct snd_pcm_runtime * runtime,unsigned int cond,int var,snd_pcm_hw_rule_func_t func,void * private,int dep,...)1165 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1166 			int var,
1167 			snd_pcm_hw_rule_func_t func, void *private,
1168 			int dep, ...)
1169 {
1170 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1171 	struct snd_pcm_hw_rule *c;
1172 	unsigned int k;
1173 	va_list args;
1174 	va_start(args, dep);
1175 	if (constrs->rules_num >= constrs->rules_all) {
1176 		struct snd_pcm_hw_rule *new;
1177 		unsigned int new_rules = constrs->rules_all + 16;
1178 		new = krealloc_array(constrs->rules, new_rules,
1179 				     sizeof(*c), GFP_KERNEL);
1180 		if (!new) {
1181 			va_end(args);
1182 			return -ENOMEM;
1183 		}
1184 		constrs->rules = new;
1185 		constrs->rules_all = new_rules;
1186 	}
1187 	c = &constrs->rules[constrs->rules_num];
1188 	c->cond = cond;
1189 	c->func = func;
1190 	c->var = var;
1191 	c->private = private;
1192 	k = 0;
1193 	while (1) {
1194 		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1195 			va_end(args);
1196 			return -EINVAL;
1197 		}
1198 		c->deps[k++] = dep;
1199 		if (dep < 0)
1200 			break;
1201 		dep = va_arg(args, int);
1202 	}
1203 	constrs->rules_num++;
1204 	va_end(args);
1205 	return 0;
1206 }
1207 EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1208 
1209 /**
1210  * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1211  * @runtime: PCM runtime instance
1212  * @var: hw_params variable to apply the mask
1213  * @mask: the bitmap mask
1214  *
1215  * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1216  *
1217  * Return: Zero if successful, or a negative error code on failure.
1218  */
snd_pcm_hw_constraint_mask(struct snd_pcm_runtime * runtime,snd_pcm_hw_param_t var,u_int32_t mask)1219 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1220 			       u_int32_t mask)
1221 {
1222 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1223 	struct snd_mask *maskp = constrs_mask(constrs, var);
1224 	*maskp->bits &= mask;
1225 	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1226 	if (*maskp->bits == 0)
1227 		return -EINVAL;
1228 	return 0;
1229 }
1230 
1231 /**
1232  * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1233  * @runtime: PCM runtime instance
1234  * @var: hw_params variable to apply the mask
1235  * @mask: the 64bit bitmap mask
1236  *
1237  * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1238  *
1239  * Return: Zero if successful, or a negative error code on failure.
1240  */
snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime * runtime,snd_pcm_hw_param_t var,u_int64_t mask)1241 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1242 				 u_int64_t mask)
1243 {
1244 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1245 	struct snd_mask *maskp = constrs_mask(constrs, var);
1246 	maskp->bits[0] &= (u_int32_t)mask;
1247 	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1248 	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1249 	if (! maskp->bits[0] && ! maskp->bits[1])
1250 		return -EINVAL;
1251 	return 0;
1252 }
1253 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1254 
1255 /**
1256  * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1257  * @runtime: PCM runtime instance
1258  * @var: hw_params variable to apply the integer constraint
1259  *
1260  * Apply the constraint of integer to an interval parameter.
1261  *
1262  * Return: Positive if the value is changed, zero if it's not changed, or a
1263  * negative error code.
1264  */
snd_pcm_hw_constraint_integer(struct snd_pcm_runtime * runtime,snd_pcm_hw_param_t var)1265 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1266 {
1267 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1268 	return snd_interval_setinteger(constrs_interval(constrs, var));
1269 }
1270 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1271 
1272 /**
1273  * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1274  * @runtime: PCM runtime instance
1275  * @var: hw_params variable to apply the range
1276  * @min: the minimal value
1277  * @max: the maximal value
1278  *
1279  * Apply the min/max range constraint to an interval parameter.
1280  *
1281  * Return: Positive if the value is changed, zero if it's not changed, or a
1282  * negative error code.
1283  */
snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime * runtime,snd_pcm_hw_param_t var,unsigned int min,unsigned int max)1284 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1285 				 unsigned int min, unsigned int max)
1286 {
1287 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1288 	struct snd_interval t;
1289 	t.min = min;
1290 	t.max = max;
1291 	t.openmin = t.openmax = 0;
1292 	t.integer = 0;
1293 	return snd_interval_refine(constrs_interval(constrs, var), &t);
1294 }
1295 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1296 
snd_pcm_hw_rule_list(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1297 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1298 				struct snd_pcm_hw_rule *rule)
1299 {
1300 	struct snd_pcm_hw_constraint_list *list = rule->private;
1301 	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1302 }
1303 
1304 
1305 /**
1306  * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1307  * @runtime: PCM runtime instance
1308  * @cond: condition bits
1309  * @var: hw_params variable to apply the list constraint
1310  * @l: list
1311  *
1312  * Apply the list of constraints to an interval parameter.
1313  *
1314  * Return: Zero if successful, or a negative error code on failure.
1315  */
snd_pcm_hw_constraint_list(struct snd_pcm_runtime * runtime,unsigned int cond,snd_pcm_hw_param_t var,const struct snd_pcm_hw_constraint_list * l)1316 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1317 			       unsigned int cond,
1318 			       snd_pcm_hw_param_t var,
1319 			       const struct snd_pcm_hw_constraint_list *l)
1320 {
1321 	return snd_pcm_hw_rule_add(runtime, cond, var,
1322 				   snd_pcm_hw_rule_list, (void *)l,
1323 				   var, -1);
1324 }
1325 EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1326 
snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1327 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1328 				  struct snd_pcm_hw_rule *rule)
1329 {
1330 	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1331 	return snd_interval_ranges(hw_param_interval(params, rule->var),
1332 				   r->count, r->ranges, r->mask);
1333 }
1334 
1335 
1336 /**
1337  * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1338  * @runtime: PCM runtime instance
1339  * @cond: condition bits
1340  * @var: hw_params variable to apply the list of range constraints
1341  * @r: ranges
1342  *
1343  * Apply the list of range constraints to an interval parameter.
1344  *
1345  * Return: Zero if successful, or a negative error code on failure.
1346  */
snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime * runtime,unsigned int cond,snd_pcm_hw_param_t var,const struct snd_pcm_hw_constraint_ranges * r)1347 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1348 				 unsigned int cond,
1349 				 snd_pcm_hw_param_t var,
1350 				 const struct snd_pcm_hw_constraint_ranges *r)
1351 {
1352 	return snd_pcm_hw_rule_add(runtime, cond, var,
1353 				   snd_pcm_hw_rule_ranges, (void *)r,
1354 				   var, -1);
1355 }
1356 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1357 
snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1358 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1359 				   struct snd_pcm_hw_rule *rule)
1360 {
1361 	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1362 	unsigned int num = 0, den = 0;
1363 	int err;
1364 	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1365 				  r->nrats, r->rats, &num, &den);
1366 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1367 		params->rate_num = num;
1368 		params->rate_den = den;
1369 	}
1370 	return err;
1371 }
1372 
1373 /**
1374  * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1375  * @runtime: PCM runtime instance
1376  * @cond: condition bits
1377  * @var: hw_params variable to apply the ratnums constraint
1378  * @r: struct snd_ratnums constriants
1379  *
1380  * Return: Zero if successful, or a negative error code on failure.
1381  */
snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime * runtime,unsigned int cond,snd_pcm_hw_param_t var,const struct snd_pcm_hw_constraint_ratnums * r)1382 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1383 				  unsigned int cond,
1384 				  snd_pcm_hw_param_t var,
1385 				  const struct snd_pcm_hw_constraint_ratnums *r)
1386 {
1387 	return snd_pcm_hw_rule_add(runtime, cond, var,
1388 				   snd_pcm_hw_rule_ratnums, (void *)r,
1389 				   var, -1);
1390 }
1391 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1392 
snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1393 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1394 				   struct snd_pcm_hw_rule *rule)
1395 {
1396 	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1397 	unsigned int num = 0, den = 0;
1398 	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1399 				  r->nrats, r->rats, &num, &den);
1400 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1401 		params->rate_num = num;
1402 		params->rate_den = den;
1403 	}
1404 	return err;
1405 }
1406 
1407 /**
1408  * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1409  * @runtime: PCM runtime instance
1410  * @cond: condition bits
1411  * @var: hw_params variable to apply the ratdens constraint
1412  * @r: struct snd_ratdens constriants
1413  *
1414  * Return: Zero if successful, or a negative error code on failure.
1415  */
snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime * runtime,unsigned int cond,snd_pcm_hw_param_t var,const struct snd_pcm_hw_constraint_ratdens * r)1416 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1417 				  unsigned int cond,
1418 				  snd_pcm_hw_param_t var,
1419 				  const struct snd_pcm_hw_constraint_ratdens *r)
1420 {
1421 	return snd_pcm_hw_rule_add(runtime, cond, var,
1422 				   snd_pcm_hw_rule_ratdens, (void *)r,
1423 				   var, -1);
1424 }
1425 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1426 
snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1427 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1428 				  struct snd_pcm_hw_rule *rule)
1429 {
1430 	unsigned int l = (unsigned long) rule->private;
1431 	int width = l & 0xffff;
1432 	unsigned int msbits = l >> 16;
1433 	const struct snd_interval *i =
1434 		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1435 
1436 	if (!snd_interval_single(i))
1437 		return 0;
1438 
1439 	if ((snd_interval_value(i) == width) ||
1440 	    (width == 0 && snd_interval_value(i) > msbits))
1441 		params->msbits = min_not_zero(params->msbits, msbits);
1442 
1443 	return 0;
1444 }
1445 
1446 /**
1447  * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1448  * @runtime: PCM runtime instance
1449  * @cond: condition bits
1450  * @width: sample bits width
1451  * @msbits: msbits width
1452  *
1453  * This constraint will set the number of most significant bits (msbits) if a
1454  * sample format with the specified width has been select. If width is set to 0
1455  * the msbits will be set for any sample format with a width larger than the
1456  * specified msbits.
1457  *
1458  * Return: Zero if successful, or a negative error code on failure.
1459  */
snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime * runtime,unsigned int cond,unsigned int width,unsigned int msbits)1460 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1461 				 unsigned int cond,
1462 				 unsigned int width,
1463 				 unsigned int msbits)
1464 {
1465 	unsigned long l = (msbits << 16) | width;
1466 	return snd_pcm_hw_rule_add(runtime, cond, -1,
1467 				    snd_pcm_hw_rule_msbits,
1468 				    (void*) l,
1469 				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1470 }
1471 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1472 
snd_pcm_hw_rule_step(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1473 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1474 				struct snd_pcm_hw_rule *rule)
1475 {
1476 	unsigned long step = (unsigned long) rule->private;
1477 	return snd_interval_step(hw_param_interval(params, rule->var), step);
1478 }
1479 
1480 /**
1481  * snd_pcm_hw_constraint_step - add a hw constraint step rule
1482  * @runtime: PCM runtime instance
1483  * @cond: condition bits
1484  * @var: hw_params variable to apply the step constraint
1485  * @step: step size
1486  *
1487  * Return: Zero if successful, or a negative error code on failure.
1488  */
snd_pcm_hw_constraint_step(struct snd_pcm_runtime * runtime,unsigned int cond,snd_pcm_hw_param_t var,unsigned long step)1489 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1490 			       unsigned int cond,
1491 			       snd_pcm_hw_param_t var,
1492 			       unsigned long step)
1493 {
1494 	return snd_pcm_hw_rule_add(runtime, cond, var,
1495 				   snd_pcm_hw_rule_step, (void *) step,
1496 				   var, -1);
1497 }
1498 EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1499 
snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1500 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1501 {
1502 	static const unsigned int pow2_sizes[] = {
1503 		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1504 		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1505 		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1506 		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1507 	};
1508 	return snd_interval_list(hw_param_interval(params, rule->var),
1509 				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1510 }
1511 
1512 /**
1513  * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1514  * @runtime: PCM runtime instance
1515  * @cond: condition bits
1516  * @var: hw_params variable to apply the power-of-2 constraint
1517  *
1518  * Return: Zero if successful, or a negative error code on failure.
1519  */
snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime * runtime,unsigned int cond,snd_pcm_hw_param_t var)1520 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1521 			       unsigned int cond,
1522 			       snd_pcm_hw_param_t var)
1523 {
1524 	return snd_pcm_hw_rule_add(runtime, cond, var,
1525 				   snd_pcm_hw_rule_pow2, NULL,
1526 				   var, -1);
1527 }
1528 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1529 
snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)1530 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1531 					   struct snd_pcm_hw_rule *rule)
1532 {
1533 	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1534 	struct snd_interval *rate;
1535 
1536 	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1537 	return snd_interval_list(rate, 1, &base_rate, 0);
1538 }
1539 
1540 /**
1541  * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1542  * @runtime: PCM runtime instance
1543  * @base_rate: the rate at which the hardware does not resample
1544  *
1545  * Return: Zero if successful, or a negative error code on failure.
1546  */
snd_pcm_hw_rule_noresample(struct snd_pcm_runtime * runtime,unsigned int base_rate)1547 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1548 			       unsigned int base_rate)
1549 {
1550 	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1551 				   SNDRV_PCM_HW_PARAM_RATE,
1552 				   snd_pcm_hw_rule_noresample_func,
1553 				   (void *)(uintptr_t)base_rate,
1554 				   SNDRV_PCM_HW_PARAM_RATE, -1);
1555 }
1556 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1557 
_snd_pcm_hw_param_any(struct snd_pcm_hw_params * params,snd_pcm_hw_param_t var)1558 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1559 				  snd_pcm_hw_param_t var)
1560 {
1561 	if (hw_is_mask(var)) {
1562 		snd_mask_any(hw_param_mask(params, var));
1563 		params->cmask |= 1 << var;
1564 		params->rmask |= 1 << var;
1565 		return;
1566 	}
1567 	if (hw_is_interval(var)) {
1568 		snd_interval_any(hw_param_interval(params, var));
1569 		params->cmask |= 1 << var;
1570 		params->rmask |= 1 << var;
1571 		return;
1572 	}
1573 	snd_BUG();
1574 }
1575 
_snd_pcm_hw_params_any(struct snd_pcm_hw_params * params)1576 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1577 {
1578 	unsigned int k;
1579 	memset(params, 0, sizeof(*params));
1580 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1581 		_snd_pcm_hw_param_any(params, k);
1582 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1583 		_snd_pcm_hw_param_any(params, k);
1584 	params->info = ~0U;
1585 }
1586 EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1587 
1588 /**
1589  * snd_pcm_hw_param_value - return @params field @var value
1590  * @params: the hw_params instance
1591  * @var: parameter to retrieve
1592  * @dir: pointer to the direction (-1,0,1) or %NULL
1593  *
1594  * Return: The value for field @var if it's fixed in configuration space
1595  * defined by @params. -%EINVAL otherwise.
1596  */
snd_pcm_hw_param_value(const struct snd_pcm_hw_params * params,snd_pcm_hw_param_t var,int * dir)1597 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1598 			   snd_pcm_hw_param_t var, int *dir)
1599 {
1600 	if (hw_is_mask(var)) {
1601 		const struct snd_mask *mask = hw_param_mask_c(params, var);
1602 		if (!snd_mask_single(mask))
1603 			return -EINVAL;
1604 		if (dir)
1605 			*dir = 0;
1606 		return snd_mask_value(mask);
1607 	}
1608 	if (hw_is_interval(var)) {
1609 		const struct snd_interval *i = hw_param_interval_c(params, var);
1610 		if (!snd_interval_single(i))
1611 			return -EINVAL;
1612 		if (dir)
1613 			*dir = i->openmin;
1614 		return snd_interval_value(i);
1615 	}
1616 	return -EINVAL;
1617 }
1618 EXPORT_SYMBOL(snd_pcm_hw_param_value);
1619 
_snd_pcm_hw_param_setempty(struct snd_pcm_hw_params * params,snd_pcm_hw_param_t var)1620 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1621 				snd_pcm_hw_param_t var)
1622 {
1623 	if (hw_is_mask(var)) {
1624 		snd_mask_none(hw_param_mask(params, var));
1625 		params->cmask |= 1 << var;
1626 		params->rmask |= 1 << var;
1627 	} else if (hw_is_interval(var)) {
1628 		snd_interval_none(hw_param_interval(params, var));
1629 		params->cmask |= 1 << var;
1630 		params->rmask |= 1 << var;
1631 	} else {
1632 		snd_BUG();
1633 	}
1634 }
1635 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1636 
_snd_pcm_hw_param_first(struct snd_pcm_hw_params * params,snd_pcm_hw_param_t var)1637 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1638 				   snd_pcm_hw_param_t var)
1639 {
1640 	int changed;
1641 	if (hw_is_mask(var))
1642 		changed = snd_mask_refine_first(hw_param_mask(params, var));
1643 	else if (hw_is_interval(var))
1644 		changed = snd_interval_refine_first(hw_param_interval(params, var));
1645 	else
1646 		return -EINVAL;
1647 	if (changed > 0) {
1648 		params->cmask |= 1 << var;
1649 		params->rmask |= 1 << var;
1650 	}
1651 	return changed;
1652 }
1653 
1654 
1655 /**
1656  * snd_pcm_hw_param_first - refine config space and return minimum value
1657  * @pcm: PCM instance
1658  * @params: the hw_params instance
1659  * @var: parameter to retrieve
1660  * @dir: pointer to the direction (-1,0,1) or %NULL
1661  *
1662  * Inside configuration space defined by @params remove from @var all
1663  * values > minimum. Reduce configuration space accordingly.
1664  *
1665  * Return: The minimum, or a negative error code on failure.
1666  */
snd_pcm_hw_param_first(struct snd_pcm_substream * pcm,struct snd_pcm_hw_params * params,snd_pcm_hw_param_t var,int * dir)1667 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1668 			   struct snd_pcm_hw_params *params,
1669 			   snd_pcm_hw_param_t var, int *dir)
1670 {
1671 	int changed = _snd_pcm_hw_param_first(params, var);
1672 	if (changed < 0)
1673 		return changed;
1674 	if (params->rmask) {
1675 		int err = snd_pcm_hw_refine(pcm, params);
1676 		if (err < 0)
1677 			return err;
1678 	}
1679 	return snd_pcm_hw_param_value(params, var, dir);
1680 }
1681 EXPORT_SYMBOL(snd_pcm_hw_param_first);
1682 
_snd_pcm_hw_param_last(struct snd_pcm_hw_params * params,snd_pcm_hw_param_t var)1683 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1684 				  snd_pcm_hw_param_t var)
1685 {
1686 	int changed;
1687 	if (hw_is_mask(var))
1688 		changed = snd_mask_refine_last(hw_param_mask(params, var));
1689 	else if (hw_is_interval(var))
1690 		changed = snd_interval_refine_last(hw_param_interval(params, var));
1691 	else
1692 		return -EINVAL;
1693 	if (changed > 0) {
1694 		params->cmask |= 1 << var;
1695 		params->rmask |= 1 << var;
1696 	}
1697 	return changed;
1698 }
1699 
1700 
1701 /**
1702  * snd_pcm_hw_param_last - refine config space and return maximum value
1703  * @pcm: PCM instance
1704  * @params: the hw_params instance
1705  * @var: parameter to retrieve
1706  * @dir: pointer to the direction (-1,0,1) or %NULL
1707  *
1708  * Inside configuration space defined by @params remove from @var all
1709  * values < maximum. Reduce configuration space accordingly.
1710  *
1711  * Return: The maximum, or a negative error code on failure.
1712  */
snd_pcm_hw_param_last(struct snd_pcm_substream * pcm,struct snd_pcm_hw_params * params,snd_pcm_hw_param_t var,int * dir)1713 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1714 			  struct snd_pcm_hw_params *params,
1715 			  snd_pcm_hw_param_t var, int *dir)
1716 {
1717 	int changed = _snd_pcm_hw_param_last(params, var);
1718 	if (changed < 0)
1719 		return changed;
1720 	if (params->rmask) {
1721 		int err = snd_pcm_hw_refine(pcm, params);
1722 		if (err < 0)
1723 			return err;
1724 	}
1725 	return snd_pcm_hw_param_value(params, var, dir);
1726 }
1727 EXPORT_SYMBOL(snd_pcm_hw_param_last);
1728 
1729 /**
1730  * snd_pcm_hw_params_bits - Get the number of bits per the sample.
1731  * @p: hardware parameters
1732  *
1733  * Return: The number of bits per sample based on the format,
1734  * subformat and msbits the specified hw params has.
1735  */
snd_pcm_hw_params_bits(const struct snd_pcm_hw_params * p)1736 int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p)
1737 {
1738 	snd_pcm_subformat_t subformat = params_subformat(p);
1739 	snd_pcm_format_t format = params_format(p);
1740 
1741 	switch (format) {
1742 	case SNDRV_PCM_FORMAT_S32_LE:
1743 	case SNDRV_PCM_FORMAT_U32_LE:
1744 	case SNDRV_PCM_FORMAT_S32_BE:
1745 	case SNDRV_PCM_FORMAT_U32_BE:
1746 		switch (subformat) {
1747 		case SNDRV_PCM_SUBFORMAT_MSBITS_20:
1748 			return 20;
1749 		case SNDRV_PCM_SUBFORMAT_MSBITS_24:
1750 			return 24;
1751 		case SNDRV_PCM_SUBFORMAT_MSBITS_MAX:
1752 		case SNDRV_PCM_SUBFORMAT_STD:
1753 		default:
1754 			break;
1755 		}
1756 		fallthrough;
1757 	default:
1758 		return snd_pcm_format_width(format);
1759 	}
1760 }
1761 EXPORT_SYMBOL(snd_pcm_hw_params_bits);
1762 
snd_pcm_lib_ioctl_reset(struct snd_pcm_substream * substream,void * arg)1763 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1764 				   void *arg)
1765 {
1766 	struct snd_pcm_runtime *runtime = substream->runtime;
1767 
1768 	guard(pcm_stream_lock_irqsave)(substream);
1769 	if (snd_pcm_running(substream) &&
1770 	    snd_pcm_update_hw_ptr(substream) >= 0)
1771 		runtime->status->hw_ptr %= runtime->buffer_size;
1772 	else {
1773 		runtime->status->hw_ptr = 0;
1774 		runtime->hw_ptr_wrap = 0;
1775 	}
1776 	return 0;
1777 }
1778 
snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream * substream,void * arg)1779 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1780 					  void *arg)
1781 {
1782 	struct snd_pcm_channel_info *info = arg;
1783 	struct snd_pcm_runtime *runtime = substream->runtime;
1784 	int width;
1785 	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1786 		info->offset = -1;
1787 		return 0;
1788 	}
1789 	width = snd_pcm_format_physical_width(runtime->format);
1790 	if (width < 0)
1791 		return width;
1792 	info->offset = 0;
1793 	switch (runtime->access) {
1794 	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1795 	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1796 		info->first = info->channel * width;
1797 		info->step = runtime->channels * width;
1798 		break;
1799 	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1800 	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1801 	{
1802 		size_t size = runtime->dma_bytes / runtime->channels;
1803 		info->first = info->channel * size * 8;
1804 		info->step = width;
1805 		break;
1806 	}
1807 	default:
1808 		snd_BUG();
1809 		break;
1810 	}
1811 	return 0;
1812 }
1813 
snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream * substream,void * arg)1814 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1815 				       void *arg)
1816 {
1817 	struct snd_pcm_hw_params *params = arg;
1818 	snd_pcm_format_t format;
1819 	int channels;
1820 	ssize_t frame_size;
1821 
1822 	params->fifo_size = substream->runtime->hw.fifo_size;
1823 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1824 		format = params_format(params);
1825 		channels = params_channels(params);
1826 		frame_size = snd_pcm_format_size(format, channels);
1827 		if (frame_size > 0)
1828 			params->fifo_size /= frame_size;
1829 	}
1830 	return 0;
1831 }
1832 
snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream * substream,void * arg)1833 static int snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream *substream,
1834 				     void *arg)
1835 {
1836 	static const unsigned char id[12] = { 0xff, 0xff, 0xff, 0xff,
1837 					      0xff, 0xff, 0xff, 0xff,
1838 					      0xff, 0xff, 0xff, 0xff };
1839 
1840 	if (substream->runtime->std_sync_id)
1841 		snd_pcm_set_sync_per_card(substream, arg, id, sizeof(id));
1842 	return 0;
1843 }
1844 
1845 /**
1846  * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1847  * @substream: the pcm substream instance
1848  * @cmd: ioctl command
1849  * @arg: ioctl argument
1850  *
1851  * Processes the generic ioctl commands for PCM.
1852  * Can be passed as the ioctl callback for PCM ops.
1853  *
1854  * Return: Zero if successful, or a negative error code on failure.
1855  */
snd_pcm_lib_ioctl(struct snd_pcm_substream * substream,unsigned int cmd,void * arg)1856 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1857 		      unsigned int cmd, void *arg)
1858 {
1859 	switch (cmd) {
1860 	case SNDRV_PCM_IOCTL1_RESET:
1861 		return snd_pcm_lib_ioctl_reset(substream, arg);
1862 	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1863 		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1864 	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1865 		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1866 	case SNDRV_PCM_IOCTL1_SYNC_ID:
1867 		return snd_pcm_lib_ioctl_sync_id(substream, arg);
1868 	}
1869 	return -ENXIO;
1870 }
1871 EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1872 
1873 /**
1874  * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1875  *						under acquired lock of PCM substream.
1876  * @substream: the instance of pcm substream.
1877  *
1878  * This function is called when the batch of audio data frames as the same size as the period of
1879  * buffer is already processed in audio data transmission.
1880  *
1881  * The call of function updates the status of runtime with the latest position of audio data
1882  * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1883  * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1884  * substream according to configured threshold.
1885  *
1886  * The function is intended to use for the case that PCM driver operates audio data frames under
1887  * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1888  * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1889  * since lock of PCM substream should be acquired in advance.
1890  *
1891  * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1892  * function:
1893  *
1894  * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1895  * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1896  * - .get_time_info - to retrieve audio time stamp if needed.
1897  *
1898  * Even if more than one periods have elapsed since the last call, you have to call this only once.
1899  */
snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream * substream)1900 void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1901 {
1902 	struct snd_pcm_runtime *runtime;
1903 
1904 	if (PCM_RUNTIME_CHECK(substream))
1905 		return;
1906 	runtime = substream->runtime;
1907 
1908 	if (!snd_pcm_running(substream) ||
1909 	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1910 		goto _end;
1911 
1912 #ifdef CONFIG_SND_PCM_TIMER
1913 	if (substream->timer_running)
1914 		snd_timer_interrupt(substream->timer, 1);
1915 #endif
1916  _end:
1917 	snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1918 }
1919 EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1920 
1921 /**
1922  * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1923  *			      PCM substream.
1924  * @substream: the instance of PCM substream.
1925  *
1926  * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1927  * acquiring lock of PCM substream voluntarily.
1928  *
1929  * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1930  * the batch of audio data frames as the same size as the period of buffer is already processed in
1931  * audio data transmission.
1932  */
snd_pcm_period_elapsed(struct snd_pcm_substream * substream)1933 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1934 {
1935 	if (snd_BUG_ON(!substream))
1936 		return;
1937 
1938 	guard(pcm_stream_lock_irqsave)(substream);
1939 	snd_pcm_period_elapsed_under_stream_lock(substream);
1940 }
1941 EXPORT_SYMBOL(snd_pcm_period_elapsed);
1942 
1943 /*
1944  * Wait until avail_min data becomes available
1945  * Returns a negative error code if any error occurs during operation.
1946  * The available space is stored on availp.  When err = 0 and avail = 0
1947  * on the capture stream, it indicates the stream is in DRAINING state.
1948  */
wait_for_avail(struct snd_pcm_substream * substream,snd_pcm_uframes_t * availp)1949 static int wait_for_avail(struct snd_pcm_substream *substream,
1950 			      snd_pcm_uframes_t *availp)
1951 {
1952 	struct snd_pcm_runtime *runtime = substream->runtime;
1953 	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1954 	wait_queue_entry_t wait;
1955 	int err = 0;
1956 	snd_pcm_uframes_t avail = 0;
1957 	long wait_time, tout;
1958 
1959 	init_waitqueue_entry(&wait, current);
1960 	set_current_state(TASK_INTERRUPTIBLE);
1961 	add_wait_queue(&runtime->tsleep, &wait);
1962 
1963 	if (runtime->no_period_wakeup)
1964 		wait_time = MAX_SCHEDULE_TIMEOUT;
1965 	else {
1966 		/* use wait time from substream if available */
1967 		if (substream->wait_time) {
1968 			wait_time = substream->wait_time;
1969 		} else {
1970 			wait_time = 100;
1971 
1972 			if (runtime->rate) {
1973 				long t = runtime->buffer_size * 1100 / runtime->rate;
1974 				wait_time = max(t, wait_time);
1975 			}
1976 		}
1977 		wait_time = msecs_to_jiffies(wait_time);
1978 	}
1979 
1980 	for (;;) {
1981 		if (signal_pending(current)) {
1982 			err = -ERESTARTSYS;
1983 			break;
1984 		}
1985 
1986 		/*
1987 		 * We need to check if space became available already
1988 		 * (and thus the wakeup happened already) first to close
1989 		 * the race of space already having become available.
1990 		 * This check must happen after been added to the waitqueue
1991 		 * and having current state be INTERRUPTIBLE.
1992 		 */
1993 		avail = snd_pcm_avail(substream);
1994 		if (avail >= runtime->twake)
1995 			break;
1996 		snd_pcm_stream_unlock_irq(substream);
1997 
1998 		tout = schedule_timeout(wait_time);
1999 
2000 		snd_pcm_stream_lock_irq(substream);
2001 		set_current_state(TASK_INTERRUPTIBLE);
2002 		switch (runtime->state) {
2003 		case SNDRV_PCM_STATE_SUSPENDED:
2004 			err = -ESTRPIPE;
2005 			goto _endloop;
2006 		case SNDRV_PCM_STATE_XRUN:
2007 			err = -EPIPE;
2008 			goto _endloop;
2009 		case SNDRV_PCM_STATE_DRAINING:
2010 			if (is_playback)
2011 				err = -EPIPE;
2012 			else
2013 				avail = 0; /* indicate draining */
2014 			goto _endloop;
2015 		case SNDRV_PCM_STATE_OPEN:
2016 		case SNDRV_PCM_STATE_SETUP:
2017 		case SNDRV_PCM_STATE_DISCONNECTED:
2018 			err = -EBADFD;
2019 			goto _endloop;
2020 		case SNDRV_PCM_STATE_PAUSED:
2021 			continue;
2022 		}
2023 		if (!tout) {
2024 			pcm_dbg(substream->pcm,
2025 				"%s timeout (DMA or IRQ trouble?)\n",
2026 				is_playback ? "playback write" : "capture read");
2027 			err = -EIO;
2028 			break;
2029 		}
2030 	}
2031  _endloop:
2032 	set_current_state(TASK_RUNNING);
2033 	remove_wait_queue(&runtime->tsleep, &wait);
2034 	*availp = avail;
2035 	return err;
2036 }
2037 
2038 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
2039 			      int channel, unsigned long hwoff,
2040 			      struct iov_iter *iter, unsigned long bytes);
2041 
2042 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
2043 			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
2044 			  bool);
2045 
2046 /* calculate the target DMA-buffer position to be written/read */
get_dma_ptr(struct snd_pcm_runtime * runtime,int channel,unsigned long hwoff)2047 static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
2048 			   int channel, unsigned long hwoff)
2049 {
2050 	return runtime->dma_area + hwoff +
2051 		channel * (runtime->dma_bytes / runtime->channels);
2052 }
2053 
2054 /* default copy ops for write; used for both interleaved and non- modes */
default_write_copy(struct snd_pcm_substream * substream,int channel,unsigned long hwoff,struct iov_iter * iter,unsigned long bytes)2055 static int default_write_copy(struct snd_pcm_substream *substream,
2056 			      int channel, unsigned long hwoff,
2057 			      struct iov_iter *iter, unsigned long bytes)
2058 {
2059 	if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2060 			   bytes, iter) != bytes)
2061 		return -EFAULT;
2062 	return 0;
2063 }
2064 
2065 /* fill silence instead of copy data; called as a transfer helper
2066  * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2067  * a NULL buffer is passed
2068  */
fill_silence(struct snd_pcm_substream * substream,int channel,unsigned long hwoff,struct iov_iter * iter,unsigned long bytes)2069 static int fill_silence(struct snd_pcm_substream *substream, int channel,
2070 			unsigned long hwoff, struct iov_iter *iter,
2071 			unsigned long bytes)
2072 {
2073 	struct snd_pcm_runtime *runtime = substream->runtime;
2074 
2075 	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2076 		return 0;
2077 	if (substream->ops->fill_silence)
2078 		return substream->ops->fill_silence(substream, channel,
2079 						    hwoff, bytes);
2080 
2081 	snd_pcm_format_set_silence(runtime->format,
2082 				   get_dma_ptr(runtime, channel, hwoff),
2083 				   bytes_to_samples(runtime, bytes));
2084 	return 0;
2085 }
2086 
2087 /* default copy ops for read; used for both interleaved and non- modes */
default_read_copy(struct snd_pcm_substream * substream,int channel,unsigned long hwoff,struct iov_iter * iter,unsigned long bytes)2088 static int default_read_copy(struct snd_pcm_substream *substream,
2089 			     int channel, unsigned long hwoff,
2090 			     struct iov_iter *iter, unsigned long bytes)
2091 {
2092 	if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2093 			 bytes, iter) != bytes)
2094 		return -EFAULT;
2095 	return 0;
2096 }
2097 
2098 /* call transfer with the filled iov_iter */
do_transfer(struct snd_pcm_substream * substream,int c,unsigned long hwoff,void * data,unsigned long bytes,pcm_transfer_f transfer,bool in_kernel)2099 static int do_transfer(struct snd_pcm_substream *substream, int c,
2100 		       unsigned long hwoff, void *data, unsigned long bytes,
2101 		       pcm_transfer_f transfer, bool in_kernel)
2102 {
2103 	struct iov_iter iter;
2104 	int err, type;
2105 
2106 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2107 		type = ITER_SOURCE;
2108 	else
2109 		type = ITER_DEST;
2110 
2111 	if (in_kernel) {
2112 		struct kvec kvec = { data, bytes };
2113 
2114 		iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2115 		return transfer(substream, c, hwoff, &iter, bytes);
2116 	}
2117 
2118 	err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2119 	if (err)
2120 		return err;
2121 	return transfer(substream, c, hwoff, &iter, bytes);
2122 }
2123 
2124 /* call transfer function with the converted pointers and sizes;
2125  * for interleaved mode, it's one shot for all samples
2126  */
interleaved_copy(struct snd_pcm_substream * substream,snd_pcm_uframes_t hwoff,void * data,snd_pcm_uframes_t off,snd_pcm_uframes_t frames,pcm_transfer_f transfer,bool in_kernel)2127 static int interleaved_copy(struct snd_pcm_substream *substream,
2128 			    snd_pcm_uframes_t hwoff, void *data,
2129 			    snd_pcm_uframes_t off,
2130 			    snd_pcm_uframes_t frames,
2131 			    pcm_transfer_f transfer,
2132 			    bool in_kernel)
2133 {
2134 	struct snd_pcm_runtime *runtime = substream->runtime;
2135 
2136 	/* convert to bytes */
2137 	hwoff = frames_to_bytes(runtime, hwoff);
2138 	off = frames_to_bytes(runtime, off);
2139 	frames = frames_to_bytes(runtime, frames);
2140 
2141 	return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2142 			   in_kernel);
2143 }
2144 
2145 /* call transfer function with the converted pointers and sizes for each
2146  * non-interleaved channel; when buffer is NULL, silencing instead of copying
2147  */
noninterleaved_copy(struct snd_pcm_substream * substream,snd_pcm_uframes_t hwoff,void * data,snd_pcm_uframes_t off,snd_pcm_uframes_t frames,pcm_transfer_f transfer,bool in_kernel)2148 static int noninterleaved_copy(struct snd_pcm_substream *substream,
2149 			       snd_pcm_uframes_t hwoff, void *data,
2150 			       snd_pcm_uframes_t off,
2151 			       snd_pcm_uframes_t frames,
2152 			       pcm_transfer_f transfer,
2153 			       bool in_kernel)
2154 {
2155 	struct snd_pcm_runtime *runtime = substream->runtime;
2156 	int channels = runtime->channels;
2157 	void **bufs = data;
2158 	int c, err;
2159 
2160 	/* convert to bytes; note that it's not frames_to_bytes() here.
2161 	 * in non-interleaved mode, we copy for each channel, thus
2162 	 * each copy is n_samples bytes x channels = whole frames.
2163 	 */
2164 	off = samples_to_bytes(runtime, off);
2165 	frames = samples_to_bytes(runtime, frames);
2166 	hwoff = samples_to_bytes(runtime, hwoff);
2167 	for (c = 0; c < channels; ++c, ++bufs) {
2168 		if (!data || !*bufs)
2169 			err = fill_silence(substream, c, hwoff, NULL, frames);
2170 		else
2171 			err = do_transfer(substream, c, hwoff, *bufs + off,
2172 					  frames, transfer, in_kernel);
2173 		if (err < 0)
2174 			return err;
2175 	}
2176 	return 0;
2177 }
2178 
2179 /* fill silence on the given buffer position;
2180  * called from snd_pcm_playback_silence()
2181  */
fill_silence_frames(struct snd_pcm_substream * substream,snd_pcm_uframes_t off,snd_pcm_uframes_t frames)2182 static int fill_silence_frames(struct snd_pcm_substream *substream,
2183 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2184 {
2185 	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2186 	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2187 		return interleaved_copy(substream, off, NULL, 0, frames,
2188 					fill_silence, true);
2189 	else
2190 		return noninterleaved_copy(substream, off, NULL, 0, frames,
2191 					   fill_silence, true);
2192 }
2193 
2194 /* sanity-check for read/write methods */
pcm_sanity_check(struct snd_pcm_substream * substream)2195 static int pcm_sanity_check(struct snd_pcm_substream *substream)
2196 {
2197 	struct snd_pcm_runtime *runtime;
2198 	if (PCM_RUNTIME_CHECK(substream))
2199 		return -ENXIO;
2200 	runtime = substream->runtime;
2201 	if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2202 		return -EINVAL;
2203 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2204 		return -EBADFD;
2205 	return 0;
2206 }
2207 
pcm_accessible_state(struct snd_pcm_runtime * runtime)2208 static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2209 {
2210 	switch (runtime->state) {
2211 	case SNDRV_PCM_STATE_PREPARED:
2212 	case SNDRV_PCM_STATE_RUNNING:
2213 	case SNDRV_PCM_STATE_PAUSED:
2214 		return 0;
2215 	case SNDRV_PCM_STATE_XRUN:
2216 		return -EPIPE;
2217 	case SNDRV_PCM_STATE_SUSPENDED:
2218 		return -ESTRPIPE;
2219 	default:
2220 		return -EBADFD;
2221 	}
2222 }
2223 
2224 /* update to the given appl_ptr and call ack callback if needed;
2225  * when an error is returned, take back to the original value
2226  */
pcm_lib_apply_appl_ptr(struct snd_pcm_substream * substream,snd_pcm_uframes_t appl_ptr)2227 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2228 			   snd_pcm_uframes_t appl_ptr)
2229 {
2230 	struct snd_pcm_runtime *runtime = substream->runtime;
2231 	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2232 	snd_pcm_sframes_t diff;
2233 	int ret;
2234 
2235 	if (old_appl_ptr == appl_ptr)
2236 		return 0;
2237 
2238 	if (appl_ptr >= runtime->boundary)
2239 		return -EINVAL;
2240 	/*
2241 	 * check if a rewind is requested by the application
2242 	 */
2243 	if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2244 		diff = appl_ptr - old_appl_ptr;
2245 		if (diff >= 0) {
2246 			if (diff > runtime->buffer_size)
2247 				return -EINVAL;
2248 		} else {
2249 			if (runtime->boundary + diff > runtime->buffer_size)
2250 				return -EINVAL;
2251 		}
2252 	}
2253 
2254 	runtime->control->appl_ptr = appl_ptr;
2255 	if (substream->ops->ack) {
2256 		ret = substream->ops->ack(substream);
2257 		if (ret < 0) {
2258 			runtime->control->appl_ptr = old_appl_ptr;
2259 			if (ret == -EPIPE)
2260 				__snd_pcm_xrun(substream);
2261 			return ret;
2262 		}
2263 	}
2264 
2265 	trace_applptr(substream, old_appl_ptr, appl_ptr);
2266 
2267 	return 0;
2268 }
2269 
2270 /* the common loop for read/write data */
__snd_pcm_lib_xfer(struct snd_pcm_substream * substream,void * data,bool interleaved,snd_pcm_uframes_t size,bool in_kernel)2271 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2272 				     void *data, bool interleaved,
2273 				     snd_pcm_uframes_t size, bool in_kernel)
2274 {
2275 	struct snd_pcm_runtime *runtime = substream->runtime;
2276 	snd_pcm_uframes_t xfer = 0;
2277 	snd_pcm_uframes_t offset = 0;
2278 	snd_pcm_uframes_t avail;
2279 	pcm_copy_f writer;
2280 	pcm_transfer_f transfer;
2281 	bool nonblock;
2282 	bool is_playback;
2283 	int err;
2284 
2285 	err = pcm_sanity_check(substream);
2286 	if (err < 0)
2287 		return err;
2288 
2289 	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2290 	if (interleaved) {
2291 		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2292 		    runtime->channels > 1)
2293 			return -EINVAL;
2294 		writer = interleaved_copy;
2295 	} else {
2296 		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2297 			return -EINVAL;
2298 		writer = noninterleaved_copy;
2299 	}
2300 
2301 	if (!data) {
2302 		if (is_playback)
2303 			transfer = fill_silence;
2304 		else
2305 			return -EINVAL;
2306 	} else {
2307 		if (substream->ops->copy)
2308 			transfer = substream->ops->copy;
2309 		else
2310 			transfer = is_playback ?
2311 				default_write_copy : default_read_copy;
2312 	}
2313 
2314 	if (size == 0)
2315 		return 0;
2316 
2317 	nonblock = !!(substream->f_flags & O_NONBLOCK);
2318 
2319 	snd_pcm_stream_lock_irq(substream);
2320 	err = pcm_accessible_state(runtime);
2321 	if (err < 0)
2322 		goto _end_unlock;
2323 
2324 	runtime->twake = runtime->control->avail_min ? : 1;
2325 	if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2326 		snd_pcm_update_hw_ptr(substream);
2327 
2328 	/*
2329 	 * If size < start_threshold, wait indefinitely. Another
2330 	 * thread may start capture
2331 	 */
2332 	if (!is_playback &&
2333 	    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2334 	    size >= runtime->start_threshold) {
2335 		err = snd_pcm_start(substream);
2336 		if (err < 0)
2337 			goto _end_unlock;
2338 	}
2339 
2340 	avail = snd_pcm_avail(substream);
2341 
2342 	while (size > 0) {
2343 		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2344 		snd_pcm_uframes_t cont;
2345 		if (!avail) {
2346 			if (!is_playback &&
2347 			    runtime->state == SNDRV_PCM_STATE_DRAINING) {
2348 				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2349 				goto _end_unlock;
2350 			}
2351 			if (nonblock) {
2352 				err = -EAGAIN;
2353 				goto _end_unlock;
2354 			}
2355 			runtime->twake = min_t(snd_pcm_uframes_t, size,
2356 					runtime->control->avail_min ? : 1);
2357 			err = wait_for_avail(substream, &avail);
2358 			if (err < 0)
2359 				goto _end_unlock;
2360 			if (!avail)
2361 				continue; /* draining */
2362 		}
2363 		frames = size > avail ? avail : size;
2364 		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2365 		appl_ofs = appl_ptr % runtime->buffer_size;
2366 		cont = runtime->buffer_size - appl_ofs;
2367 		if (frames > cont)
2368 			frames = cont;
2369 		if (snd_BUG_ON(!frames)) {
2370 			err = -EINVAL;
2371 			goto _end_unlock;
2372 		}
2373 		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2374 			err = -EBUSY;
2375 			goto _end_unlock;
2376 		}
2377 		snd_pcm_stream_unlock_irq(substream);
2378 		if (!is_playback)
2379 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2380 		err = writer(substream, appl_ofs, data, offset, frames,
2381 			     transfer, in_kernel);
2382 		if (is_playback)
2383 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2384 		snd_pcm_stream_lock_irq(substream);
2385 		atomic_dec(&runtime->buffer_accessing);
2386 		if (err < 0)
2387 			goto _end_unlock;
2388 		err = pcm_accessible_state(runtime);
2389 		if (err < 0)
2390 			goto _end_unlock;
2391 		appl_ptr += frames;
2392 		if (appl_ptr >= runtime->boundary)
2393 			appl_ptr -= runtime->boundary;
2394 		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2395 		if (err < 0)
2396 			goto _end_unlock;
2397 
2398 		offset += frames;
2399 		size -= frames;
2400 		xfer += frames;
2401 		avail -= frames;
2402 		if (is_playback &&
2403 		    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2404 		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2405 			err = snd_pcm_start(substream);
2406 			if (err < 0)
2407 				goto _end_unlock;
2408 		}
2409 	}
2410  _end_unlock:
2411 	runtime->twake = 0;
2412 	if (xfer > 0 && err >= 0)
2413 		snd_pcm_update_state(substream, runtime);
2414 	snd_pcm_stream_unlock_irq(substream);
2415 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2416 }
2417 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2418 
2419 /*
2420  * standard channel mapping helpers
2421  */
2422 
2423 /* default channel maps for multi-channel playbacks, up to 8 channels */
2424 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2425 	{ .channels = 1,
2426 	  .map = { SNDRV_CHMAP_MONO } },
2427 	{ .channels = 2,
2428 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2429 	{ .channels = 4,
2430 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2431 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2432 	{ .channels = 6,
2433 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2434 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2435 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2436 	{ .channels = 8,
2437 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2438 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2439 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2440 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2441 	{ }
2442 };
2443 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2444 
2445 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2446 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2447 	{ .channels = 1,
2448 	  .map = { SNDRV_CHMAP_MONO } },
2449 	{ .channels = 2,
2450 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2451 	{ .channels = 4,
2452 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2453 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2454 	{ .channels = 6,
2455 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2456 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2457 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2458 	{ .channels = 8,
2459 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2460 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2461 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2462 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2463 	{ }
2464 };
2465 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2466 
valid_chmap_channels(const struct snd_pcm_chmap * info,int ch)2467 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2468 {
2469 	if (ch > info->max_channels)
2470 		return false;
2471 	return !info->channel_mask || (info->channel_mask & (1U << ch));
2472 }
2473 
pcm_chmap_ctl_info(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_info * uinfo)2474 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2475 			      struct snd_ctl_elem_info *uinfo)
2476 {
2477 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2478 
2479 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2480 	uinfo->count = info->max_channels;
2481 	uinfo->value.integer.min = 0;
2482 	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2483 	return 0;
2484 }
2485 
2486 /* get callback for channel map ctl element
2487  * stores the channel position firstly matching with the current channels
2488  */
pcm_chmap_ctl_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)2489 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2490 			     struct snd_ctl_elem_value *ucontrol)
2491 {
2492 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2493 	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2494 	struct snd_pcm_substream *substream;
2495 	const struct snd_pcm_chmap_elem *map;
2496 
2497 	if (!info->chmap)
2498 		return -EINVAL;
2499 	substream = snd_pcm_chmap_substream(info, idx);
2500 	if (!substream)
2501 		return -ENODEV;
2502 	memset(ucontrol->value.integer.value, 0,
2503 	       sizeof(long) * info->max_channels);
2504 	if (!substream->runtime)
2505 		return 0; /* no channels set */
2506 	for (map = info->chmap; map->channels; map++) {
2507 		int i;
2508 		if (map->channels == substream->runtime->channels &&
2509 		    valid_chmap_channels(info, map->channels)) {
2510 			for (i = 0; i < map->channels; i++)
2511 				ucontrol->value.integer.value[i] = map->map[i];
2512 			return 0;
2513 		}
2514 	}
2515 	return -EINVAL;
2516 }
2517 
2518 /* tlv callback for channel map ctl element
2519  * expands the pre-defined channel maps in a form of TLV
2520  */
pcm_chmap_ctl_tlv(struct snd_kcontrol * kcontrol,int op_flag,unsigned int size,unsigned int __user * tlv)2521 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2522 			     unsigned int size, unsigned int __user *tlv)
2523 {
2524 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2525 	const struct snd_pcm_chmap_elem *map;
2526 	unsigned int __user *dst;
2527 	int c, count = 0;
2528 
2529 	if (!info->chmap)
2530 		return -EINVAL;
2531 	if (size < 8)
2532 		return -ENOMEM;
2533 	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2534 		return -EFAULT;
2535 	size -= 8;
2536 	dst = tlv + 2;
2537 	for (map = info->chmap; map->channels; map++) {
2538 		int chs_bytes = map->channels * 4;
2539 		if (!valid_chmap_channels(info, map->channels))
2540 			continue;
2541 		if (size < 8)
2542 			return -ENOMEM;
2543 		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2544 		    put_user(chs_bytes, dst + 1))
2545 			return -EFAULT;
2546 		dst += 2;
2547 		size -= 8;
2548 		count += 8;
2549 		if (size < chs_bytes)
2550 			return -ENOMEM;
2551 		size -= chs_bytes;
2552 		count += chs_bytes;
2553 		for (c = 0; c < map->channels; c++) {
2554 			if (put_user(map->map[c], dst))
2555 				return -EFAULT;
2556 			dst++;
2557 		}
2558 	}
2559 	if (put_user(count, tlv + 1))
2560 		return -EFAULT;
2561 	return 0;
2562 }
2563 
pcm_chmap_ctl_private_free(struct snd_kcontrol * kcontrol)2564 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2565 {
2566 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2567 	info->pcm->streams[info->stream].chmap_kctl = NULL;
2568 	kfree(info);
2569 }
2570 
2571 /**
2572  * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2573  * @pcm: the assigned PCM instance
2574  * @stream: stream direction
2575  * @chmap: channel map elements (for query)
2576  * @max_channels: the max number of channels for the stream
2577  * @private_value: the value passed to each kcontrol's private_value field
2578  * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2579  *
2580  * Create channel-mapping control elements assigned to the given PCM stream(s).
2581  * Return: Zero if successful, or a negative error value.
2582  */
snd_pcm_add_chmap_ctls(struct snd_pcm * pcm,int stream,const struct snd_pcm_chmap_elem * chmap,int max_channels,unsigned long private_value,struct snd_pcm_chmap ** info_ret)2583 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2584 			   const struct snd_pcm_chmap_elem *chmap,
2585 			   int max_channels,
2586 			   unsigned long private_value,
2587 			   struct snd_pcm_chmap **info_ret)
2588 {
2589 	struct snd_pcm_chmap *info;
2590 	struct snd_kcontrol_new knew = {
2591 		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2592 		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2593 			SNDRV_CTL_ELEM_ACCESS_VOLATILE |
2594 			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2595 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2596 		.info = pcm_chmap_ctl_info,
2597 		.get = pcm_chmap_ctl_get,
2598 		.tlv.c = pcm_chmap_ctl_tlv,
2599 	};
2600 	int err;
2601 
2602 	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2603 		return -EBUSY;
2604 	info = kzalloc(sizeof(*info), GFP_KERNEL);
2605 	if (!info)
2606 		return -ENOMEM;
2607 	info->pcm = pcm;
2608 	info->stream = stream;
2609 	info->chmap = chmap;
2610 	info->max_channels = max_channels;
2611 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2612 		knew.name = "Playback Channel Map";
2613 	else
2614 		knew.name = "Capture Channel Map";
2615 	knew.device = pcm->device;
2616 	knew.count = pcm->streams[stream].substream_count;
2617 	knew.private_value = private_value;
2618 	info->kctl = snd_ctl_new1(&knew, info);
2619 	if (!info->kctl) {
2620 		kfree(info);
2621 		return -ENOMEM;
2622 	}
2623 	info->kctl->private_free = pcm_chmap_ctl_private_free;
2624 	err = snd_ctl_add(pcm->card, info->kctl);
2625 	if (err < 0)
2626 		return err;
2627 	pcm->streams[stream].chmap_kctl = info->kctl;
2628 	if (info_ret)
2629 		*info_ret = info;
2630 	return 0;
2631 }
2632 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
2633