1 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
2 
3 /*
4     Rosegarden
5     A MIDI and audio sequencer and musical notation editor.
6     Copyright 2000-2021 the Rosegarden development team.
7 
8     Other copyrights also apply to some parts of this work.  Please
9     see the AUTHORS file and individual file headers for details.
10 
11     This program is free software; you can redistribute it and/or
12     modify it under the terms of the GNU General Public License as
13     published by the Free Software Foundation; either version 2 of the
14     License, or (at your option) any later version.  See the file
15     COPYING included with this distribution for more information.
16 */
17 
18 #define RG_MODULE_STRING "[AudioPreviewPainter]"
19 
20 #include "AudioPreviewPainter.h"
21 
22 #include "CompositionModelImpl.h"
23 #include "CompositionColourCache.h"
24 #include "base/Composition.h"
25 #include "base/Track.h"
26 #include "base/AudioLevel.h"
27 #include "base/Studio.h"
28 #include "misc/Debug.h"
29 #include "misc/ConfigGroups.h"
30 #include "gui/application/RosegardenMainWindow.h"
31 
32 #include <QImage>
33 #include <QApplication>
34 #include <QSettings>
35 #if (QT_VERSION >= QT_VERSION_CHECK(5, 14, 0))
36 #include <QScreen>
37 #else
38 #include <QDesktopWidget>
39 #endif
40 
41 namespace Rosegarden {
42 
AudioPreviewPainter(CompositionModelImpl & model,CompositionModelImpl::AudioPeaks * apData,const Composition & composition,const Segment * segment)43 AudioPreviewPainter::AudioPreviewPainter(CompositionModelImpl& model,
44 					 CompositionModelImpl::AudioPeaks* apData,
45 					 const Composition &composition,
46 					 const Segment* segment)
47     : m_model(model),
48       m_apData(apData),
49       m_composition(composition),
50       m_segment(segment),
51       m_rect(),
52       m_defaultCol(CompositionColourCache::getInstance()->SegmentAudioPreview),
53       m_height(model.grid().getYSnap()/2)
54 {
55     model.getSegmentRect(*m_segment, m_rect);
56 
57     int pixWidth = std::min(m_rect.baseWidth, tileWidth());
58 
59     //NB. m_image used to be created as an 8-bit image with 4 bits per pixel.
60     // QImage::Format_Indexed8 seems to be close enough, since we manipulate the
61     // pixels directly by index, rather than employ drawing tools.
62     m_image = QImage(pixWidth, m_rect.rect.height(), QImage::Format_Indexed8);
63     m_penWidth = (std::max(1U, (unsigned int)m_rect.pen.width()) * 2);
64     m_halfRectHeight = m_model.grid().getYSnap()/2 - m_penWidth / 2 - 2;
65 }
66 
tileWidth()67 int AudioPreviewPainter::tileWidth()
68 {
69     static int tw = -1;
70     // Cached value available?  Return it.
71     if (tw != -1)
72         return tw;
73 
74 #if (QT_VERSION >= QT_VERSION_CHECK(5, 14, 0))
75     QScreen* screen = RosegardenMainWindow::self()->screen();
76     tw = screen->availableGeometry().width();
77 #else
78     tw = QApplication::desktop()->width();
79 #endif
80     return tw;
81 }
82 
paintPreviewImage()83 void AudioPreviewPainter::paintPreviewImage()
84 {
85     const CompositionModelImpl::AudioPeaks::Values &values =
86             m_apData->values;
87 
88     if (values.empty())
89         return;
90 
91     float gain[2] = { 1.0, 1.0 };
92     int instrumentChannels = 2;
93     TrackId trackId = m_segment->getTrack();
94     Track *track = m_model.getComposition().getTrackById(trackId);
95     if (track) {
96         Instrument *instrument = m_model.getStudio().getInstrumentById(track->getInstrument());
97         if (instrument) {
98             float level = AudioLevel::dB_to_multiplier(instrument->getLevel());
99             float pan = instrument->getPan() - 100.0;
100             gain[0] = level * ((pan > 0.0) ? (1.0 - (pan / 100.0)) : 1.0);
101             gain[1] = level * ((pan < 0.0) ? ((pan + 100.0) / 100.0) : 1.0);
102 	    instrumentChannels = instrument->getAudioChannels();
103         }
104     }
105 
106     // This was always false.
107     bool showMinima = false;  //m_apData->showsMinima();
108 
109     unsigned int channels = m_apData->channels;
110     if (channels == 0) {
111         RG_WARNING << "paintPreviewImage(): WARNING: problem with audio file for segment " << m_segment->getLabel().c_str();
112         return;
113     }
114 
115     int samplePoints = int(values.size()) / (channels * (showMinima ? 2 : 1));
116     float h1, h2, l1 = 0, l2 = 0;
117     double sampleScaleFactor = samplePoints / double(m_rect.baseWidth);
118     m_sliceNb = 0;
119 
120     initializeNewSlice();
121 
122     int centre = m_image.height() / 2;
123 
124     //RG_DEBUG << "paintPreviewImage(): width = " << m_rect.baseWidth << ", height = " << m_rect.rect.height() << ", halfRectHeight = " << m_halfRectHeight;
125     //RG_DEBUG << "paintPreviewImage(): channels = " << channels << ", gain left = " << gain[0] << ", right = " << gain[1];
126 
127     // double audioDuration = double(m_segment->getAudioEndTime().sec) +
128     //     double(m_segment->getAudioEndTime().nsec) / 1000000000.0;
129 
130     // We need to take each pixel value and map it onto a point within
131     // the preview.  We have samplePoints preview points in a known
132     // duration of audioDuration.  Thus each point spans a real time
133     // of audioDuration / samplePoints.  We need to convert the
134     // accumulated real time back into musical time, and map this
135     // proportionately across the segment width.
136 
137     RealTime startRT =
138 	m_model.getComposition().getElapsedRealTime(m_segment->getStartTime());
139     double startTime = double(startRT.sec) + double(startRT.nsec) / 1000000000.0;
140 
141     RealTime endRT =
142 	m_model.getComposition().getElapsedRealTime(m_segment->getEndMarkerTime());
143     double endTime = double(endRT.sec) + double(endRT.nsec) / 1000000000.0;
144 
145     bool haveTempoChange = false;
146 
147     int finalTempoChangeNumber =
148 	m_model.getComposition().getTempoChangeNumberAt
149 	(m_segment->getEndMarkerTime());
150 
151     if ((finalTempoChangeNumber >= 0) &&
152 
153 	(finalTempoChangeNumber >
154 	 m_model.getComposition().getTempoChangeNumberAt
155 	 (m_segment->getStartTime()))) {
156 
157 	haveTempoChange = true;
158     }
159 
160     QSettings settings;
161     settings.beginGroup( GeneralOptionsConfigGroup );
162 
163     bool meterLevels = (settings.value("audiopreviewstyle", 1).toUInt()
164 			== 1);
165 
166     for (int i = 0; i < m_rect.baseWidth; ++i) {
167 
168 	// i is the x coordinate within the rectangle.  We need to
169 	// calculate the position within the audio preview from which
170 	// to draw the peak for this coordinate.  It's possible there
171 	// may be more than one, in which case we need to find the
172 	// peak of all of them.
173 
174 	int position = 0;
175 
176 	if (haveTempoChange) {
177 
178 	    // First find the time corresponding to this i.
179 	    timeT musicalTime =
180 		m_model.grid().getRulerScale()->getTimeForX(m_rect.rect.x() + i);
181 	    RealTime realTime =
182 		m_model.getComposition().getElapsedRealTime(musicalTime);
183 
184 	    double time = double(realTime.sec) +
185 		double(realTime.nsec) / 1000000000.0;
186 	    double offset = time - startTime;
187 
188 	    if (endTime > startTime) {
189 		position = offset * m_rect.baseWidth / (endTime - startTime);
190 		position = int(channels * position);
191 	    }
192 
193 	} else {
194 
195 	    position = int(channels * i * sampleScaleFactor);
196 	}
197 
198         if (position < 0) continue;
199 
200         if (position >= int(values.size()) - int(channels)) {
201             finalizeCurrentSlice();
202             break;
203         }
204 
205         if (channels == 1) {
206 
207             h1 = values[position++];
208             h2 = h1;
209 
210             if (showMinima) {
211                 l1 = values[position++];
212                 l2 = l1;
213             }
214         } else {
215 
216             h1 = values[position++];
217             if (showMinima) l1 = values[position++];
218 
219             h2 = values[position++];
220             if (showMinima) l2 = values[position++];
221 
222         }
223 
224 	if (instrumentChannels == 1 && channels == 2) {
225 	    h1 = h2 = (h1 + h2) / 2;
226 	    l1 = l2 = (l1 + l2) / 2;
227 	}
228 
229 	h1 *= gain[0];
230 	h2 *= gain[1];
231 
232 	l1 *= gain[0];
233 	l2 *= gain[1];
234 
235         // int width = 1;
236 	int pixel;
237 
238         // h1 left, h2 right
239         if (h1 >= 1.0) { h1 = 1.0; pixel = 2; }
240 	else { pixel = 1; }
241 
242         int h;
243 
244 	if (meterLevels) {
245 	    h = AudioLevel::multiplier_to_preview(h1, m_height);
246 	} else {
247 	    h = h1 * m_height;
248 	}
249         if (h <= 0) h = 1;
250 	if (h > m_halfRectHeight) h = m_halfRectHeight;
251 
252         int rectX = i % tileWidth();
253 
254 	for (int py = 0; py < h; ++py) {
255 	    m_image.setPixel(rectX, centre - py, pixel);
256 	}
257 
258         if (h2 >= 1.0) { h2 = 1.0; pixel = 2; }
259         else { pixel = 1; }
260 
261 	if (meterLevels) {
262 	    h = AudioLevel::multiplier_to_preview(h2, m_height);
263 	} else {
264 	    h = h2 * m_height;
265 	}
266         if (h < 0) h = 0;
267 
268 	for (int py = 0; py < h; ++py) {
269 	    m_image.setPixel(rectX, centre + py, pixel);
270 	}
271 
272         if (((i+1) % tileWidth()) == 0 || i == (m_rect.baseWidth - 1)) {
273             finalizeCurrentSlice();
274             initializeNewSlice();
275         }
276     }
277 
278 /* Auto-fade not yet implemented.
279 
280     if (m_segment->isAutoFading()) {
281 
282         Composition &comp = m_model.getComposition();
283 
284         int audioFadeInEnd = int(
285                                  m_model.grid().getRulerScale()->getXForTime(comp.
286                                                                      getElapsedTimeForRealTime(m_segment->getFadeInTime()) +
287                                                                      m_segment->getStartTime()) -
288                                  m_model.grid().getRulerScale()->getXForTime(m_segment->getStartTime()));
289 
290         m_p.setPen(QColor(Qt::blue));
291         m_p.drawRect(0,  m_apData->getSegmentRect().height() - 1, audioFadeInEnd, 1);
292         m_pb.drawRect(0, m_apData->getSegmentRect().height() - 1, audioFadeInEnd, 1);
293     }
294 
295     m_p.end();
296     m_pb.end();
297 */
298 
299     settings.endGroup();
300 }
301 
initializeNewSlice()302 void AudioPreviewPainter::initializeNewSlice()
303 {
304     // transparent background
305     m_image.setColor(0, qRgba(255, 255, 255, 0));
306 
307     // foreground from getPreviewColour()
308     QColor c = m_segment->getPreviewColour();
309     QRgb rgba = qRgba(c.red(), c.green(), c.blue(), 255);
310     m_image.setColor(1, rgba);
311 
312     // red for clipping
313     m_image.setColor(2, qRgba(255, 0, 0, 255));
314 
315     m_image.fill(0);
316 }
317 
finalizeCurrentSlice()318 void AudioPreviewPainter::finalizeCurrentSlice()
319 {
320 //     RG_DEBUG << "AudioPreviewPainter::finalizeCurrentSlice : copying pixmap to image at " << m_sliceNb * tileWidth();
321 
322     m_previewPixmaps.push_back(m_image.copy());
323     ++m_sliceNb;
324 }
325 
getPreviewImage()326 CompositionModelImpl::QImageVector AudioPreviewPainter::getPreviewImage()
327 {
328     return m_previewPixmaps;
329 }
330 
331 }
332