1 // -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; -*-
2 // (c) 2020 Henner Zeller <h.zeller@acm.org>
3 //
4 // This program is free software; you can redistribute it and/or modify
5 // it under the terms of the GNU General Public License as published by
6 // the Free Software Foundation version 2.
7 //
8 // This program is distributed in the hope that it will be useful,
9 // but WITHOUT ANY WARRANTY; without even the implied warranty of
10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 // GNU General Public License for more details.
12 //
13 // You should have received a copy of the GNU General Public License
14 // along with this program.  If not, see <http://gnu.org/licenses/gpl-2.0.txt>
15 
16 // TODO; help needed.
17 // * sound output ((platform independently ?)
18 
19 #include "video-display.h"
20 
21 #include "image-display.h"
22 #include "timg-time.h"
23 
24 #include <mutex>
25 #include <thread>
26 #include <utility>
27 
28 // libav: "U NO extern C in header ?"
29 extern "C" {
30 #  include <libavcodec/avcodec.h>
31 #  if HAVE_AVDEVICE
32 #    include <libavdevice/avdevice.h>
33 #  endif
34 #  include <libavformat/avformat.h>
35 #  include <libavutil/imgutils.h>
36 #  include <libavutil/log.h>
37 #  include <libswscale/swscale.h>
38 }
39 
40 static constexpr bool kDebug = false;
41 
42 namespace timg {
43 // Convert deprecated color formats to new and manually set the color range.
44 // YUV has funny ranges (16-235), while the YUVJ are 0-255. SWS prefers to
45 // deal with the YUV range, but then requires to set the output range.
46 // https://libav.org/documentation/doxygen/master/pixfmt_8h.html#a9a8e335cf3be472042bc9f0cf80cd4c5
CreateSWSContext(const AVCodecContext * codec_ctx,int display_width,int display_height)47 static SwsContext *CreateSWSContext(const AVCodecContext *codec_ctx,
48                                     int display_width, int display_height) {
49     AVPixelFormat src_pix_fmt;
50     bool src_range_extended_yuvj = true;
51     // Remap deprecated to new pixel format.
52     switch (codec_ctx->pix_fmt) {
53     case AV_PIX_FMT_YUVJ420P: src_pix_fmt = AV_PIX_FMT_YUV420P; break;
54     case AV_PIX_FMT_YUVJ422P: src_pix_fmt = AV_PIX_FMT_YUV422P; break;
55     case AV_PIX_FMT_YUVJ444P: src_pix_fmt = AV_PIX_FMT_YUV444P; break;
56     case AV_PIX_FMT_YUVJ440P: src_pix_fmt = AV_PIX_FMT_YUV440P; break;
57     default:
58         src_range_extended_yuvj = false;
59         src_pix_fmt = codec_ctx->pix_fmt;
60     }
61     SwsContext *swsCtx = sws_getContext(codec_ctx->width, codec_ctx->height,
62                                         src_pix_fmt,
63                                         display_width, display_height,
64                                         AV_PIX_FMT_RGBA,
65                                         SWS_BILINEAR, NULL, NULL, NULL);
66     if (src_range_extended_yuvj) {
67         // Manually set the source range to be extended. Read modify write.
68         int dontcare[4];
69         int src_range, dst_range;
70         int brightness, contrast, saturation;
71         sws_getColorspaceDetails(swsCtx, (int**)&dontcare, &src_range,
72                                  (int**)&dontcare, &dst_range, &brightness,
73                                  &contrast, &saturation);
74         const int* coefs = sws_getCoefficients(SWS_CS_DEFAULT);
75         src_range = 1;  // New src range.
76         sws_setColorspaceDetails(swsCtx, coefs, src_range, coefs, dst_range,
77                                  brightness, contrast, saturation);
78     }
79     return swsCtx;
80 }
81 
dummy_log(void *,int,const char *,va_list)82 static void dummy_log(void *, int, const char *, va_list) {
83     // Let's not disturb our terminal with messages from here.
84     // Maybe add logging to separate stream later.
85 }
86 
OnceInitialize()87 static void OnceInitialize() {
88 #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(58, 9, 100)
89     av_register_all();
90 #endif
91 #if HAVE_AVDEVICE
92     avdevice_register_all();
93 #endif
94     avformat_network_init();
95     av_log_set_callback(dummy_log);
96 }
97 
VideoLoader(const std::string & filename)98 VideoLoader::VideoLoader(const std::string &filename) : ImageSource(filename) {
99     static std::once_flag init;
100     std::call_once(init, OnceInitialize);
101 }
102 
~VideoLoader()103 VideoLoader::~VideoLoader() {
104     avcodec_close(codec_context_);
105     sws_freeContext(sws_context_);
106     avformat_close_input(&format_context_);
107     delete terminal_fb_;
108 }
109 
VersionInfo()110 const char *VideoLoader::VersionInfo() {
111     return "libav " AV_STRINGIFY(LIBAVFORMAT_VERSION);
112 }
113 
LoadAndScale(const DisplayOptions & display_options,int frame_offset,int frame_count)114 bool VideoLoader::LoadAndScale(const DisplayOptions &display_options,
115                                int frame_offset, int frame_count) {
116     options_ = display_options;
117     frame_offset_ = frame_offset;
118     frame_count_ = frame_count;
119 
120     const char *file = (filename() == "-")
121         ? "/dev/stdin"
122         : filename().c_str();
123     // Only consider applying transparency for certain file types we know
124     // it might happen.
125     for (const char *ending : { ".png", ".gif", "/dev/stdin" }) {
126         if (strcasecmp(file + strlen(file) - strlen(ending), ending) == 0) {
127             maybe_transparent_ = true;
128             break;
129         }
130     }
131 
132     format_context_ = avformat_alloc_context();
133     int ret;
134     if ((ret = avformat_open_input(&format_context_, file, NULL, NULL)) != 0) {
135         char msg[100];
136         av_strerror(ret, msg, sizeof(msg));
137         if (kDebug) fprintf(stderr, "%s: %s\n", file, msg);
138 #if not HAVE_AVDEVICE
139         // Not compiled in video device support. Try to give helpful message.
140         if (strncmp(file, "/dev/video", strlen("/dev/video")) == 0) {
141             fprintf(stderr, "Need to compile with -DWITH_VIDEO_DEVICE=On to "
142                     "access v4l2 device\n");
143         }
144 #endif
145         return false;
146     }
147 
148     if (avformat_find_stream_info(format_context_, NULL) < 0) {
149         if (kDebug) fprintf(stderr, "Couldn't find stream information\n");
150         return false;
151     }
152 
153     // Find the first video stream
154     AVCodecParameters *codec_parameters = nullptr;
155     AVCodec *av_codec = nullptr;
156     for (int i = 0; i < (int)format_context_->nb_streams; ++i) {
157         codec_parameters = format_context_->streams[i]->codecpar;
158         av_codec = avcodec_find_decoder(codec_parameters->codec_id);
159         if (!av_codec) continue;
160         if (codec_parameters->codec_type == AVMEDIA_TYPE_VIDEO) {
161             video_stream_index_ = i;
162             break;
163         }
164     }
165     if (video_stream_index_ == -1)
166         return false;
167 
168     auto stream = format_context_->streams[video_stream_index_];
169     AVRational rate = av_guess_frame_rate(format_context_, stream, nullptr);
170     frame_duration_ = Duration::Nanos(1e9 * rate.den / rate.num);
171 
172     codec_context_ = avcodec_alloc_context3(av_codec);
173     if (av_codec->capabilities & AV_CODEC_CAP_FRAME_THREADS &&
174         std::thread::hardware_concurrency() > 1) {
175         codec_context_->thread_type = FF_THREAD_FRAME;
176         codec_context_->thread_count =
177             std::min(4, (int)std::thread::hardware_concurrency());
178     }
179     if (avcodec_parameters_to_context(codec_context_, codec_parameters) < 0)
180         return false;
181     if (avcodec_open2(codec_context_, av_codec, NULL) < 0
182         || codec_context_->width <= 0 || codec_context_->height <= 0)
183         return false;
184 
185     /*
186      * Prepare frame to hold the scaled target frame to be send to matrix.
187      */
188     int target_width = 0;
189     int target_height = 0;
190 
191     // Make display fit within canvas using the timg scaling utility.
192     DisplayOptions opts(display_options);
193     // Make sure we don't confuse users. Some image URLs actually end up here,
194     // so make sure that it is clear certain options won't work.
195     // TODO: this is a crude work-around. And while we tell the user what to
196     // do, it would be better if we'd dealt with it already.
197     if (opts.crop_border != 0 || opts.auto_crop) {
198         const bool is_url = (strncmp(file, "http://", 7) == 0 ||
199                              strncmp(file, "https://", 8) == 0);
200         fprintf(stderr, "%s%s is handled by video subsystem. "
201                 "Unfortunately, no auto-crop feature is implemented there.\n",
202                 is_url ? "URL " : "", file);
203         if (is_url) {
204             fprintf(stderr, "use:\n\twget -qO- %s | timg -T%d -\n... instead "
205                     "for this to work\n", file, opts.crop_border);
206         }
207     }
208     opts.fill_height = false;  // This only makes sense for horizontal scroll.
209     CalcScaleToFitDisplay(codec_context_->width, codec_context_->height,
210                           opts, false, &target_width, &target_height);
211 
212     if (display_options.center_horizontally) {
213         center_indentation_ = (display_options.width - target_width)/2;
214     }
215     // initialize SWS context for software scaling
216     sws_context_ = CreateSWSContext(codec_context_,
217                                     target_width, target_height);
218     if (!sws_context_) {
219         if (kDebug) fprintf(stderr, "Trouble doing scaling to %dx%d :(\n",
220                             opts.width, opts.height);
221         return false;
222     }
223 
224     // Framebuffer to interface with the timg TerminalCanvas
225     terminal_fb_ = new timg::Framebuffer(target_width, target_height);
226     return true;
227 }
228 
AlphaBlendFramebuffer()229 void VideoLoader::AlphaBlendFramebuffer() {
230     if (!maybe_transparent_) return;
231     terminal_fb_->AlphaComposeBackground(
232         options_.bgcolor_getter,
233         options_.bg_pattern_color,
234         options_.pattern_size * options_.cell_x_px,
235         options_.pattern_size * options_.cell_y_px/2);
236 }
237 
SendFrames(Duration duration,int loops,const volatile sig_atomic_t & interrupt_received,const Renderer::WriteFramebufferFun & sink)238 void VideoLoader::SendFrames(Duration duration, int loops,
239                              const volatile sig_atomic_t &interrupt_received,
240                              const Renderer::WriteFramebufferFun &sink) {
241     const bool frame_limit = (frame_count_ > 0);
242 
243     if (frame_count_ == 1)  // If there is only one frame, nothing to repeat.
244         loops = 1;
245 
246     // Unlike animated images, in which a not set value in loops means
247     // 'infinite' repeat, it feels more sensible to show videos exactly once
248     // then. A negative value otherwise is considered 'forever'
249     const bool animated_png = filename().size() > 3
250         && (strcasecmp(filename().c_str() + filename().size() - 3, "png") == 0);
251     const bool loop_forever = (loops < 0) &&
252         (loops != timg::kNotInitialized || animated_png);
253 
254     if (loops == timg::kNotInitialized && !animated_png)
255         loops = 1;
256 
257     AVPacket *packet = av_packet_alloc();
258     bool is_first = true;
259     timg::Duration time_from_first_frame;
260 
261     // We made guesses above if something is potentially an animation, but
262     // we don't know until we observe how many frames there are - we don't
263     // know beforehand.
264     // So we will only loop iff we do not observe exactly one frame.
265     int observed_frame_count = 0;
266 
267     AVFrame *decode_frame = av_frame_alloc();  // Decode video into this
268     for (int k = 0;
269          ((loop_forever || k < loops) && observed_frame_count != 1)
270              && !interrupt_received
271              && time_from_first_frame < duration;
272          ++k) {
273         if (k > 0) {
274             // Rewind unless we're just starting.
275             av_seek_frame(format_context_, video_stream_index_, 0,
276                           AVSEEK_FLAG_ANY);
277             avcodec_flush_buffers(codec_context_);
278         }
279         observed_frame_count = 0;
280         int remaining_frames = frame_count_;
281         int skip_offset = frame_offset_;
282         int decode_in_flight = 0;
283 
284         bool state_reading = true;
285 
286         while (!interrupt_received && time_from_first_frame < duration
287                && (!frame_limit || remaining_frames > 0)) {
288 
289             if (state_reading &&
290                 av_read_frame(format_context_, packet) != 0) {
291                 state_reading = false;  // Ran out of packets from input
292             }
293 
294             if (!state_reading && decode_in_flight == 0)
295                 break;     // Decoder fully drained.
296 
297             if (state_reading && packet->stream_index != video_stream_index_) {
298                 av_packet_unref(packet);
299                 continue;  // Not a packet we're interested in
300             }
301 
302             if (state_reading) {
303                 if (avcodec_send_packet(codec_context_, packet) == 0) {
304                     ++decode_in_flight;
305                 }
306                 av_packet_unref(packet);
307             } else {
308                 avcodec_send_packet(codec_context_, nullptr);  // Trigger drain
309             }
310 
311             while (decode_in_flight &&
312                    avcodec_receive_frame(codec_context_, decode_frame) == 0) {
313                 --decode_in_flight;
314                 if (skip_offset > 0) {
315                     // TODO: there is probably a faster/better way to skip
316                     // ahead to the last keyframe first.
317                     --skip_offset;
318                     continue;
319                 }
320 
321                 time_from_first_frame.Add(frame_duration_);
322                 // TODO: when frame skipping enabled, avoid this step if we're
323                 // falling behind.
324                 sws_scale(sws_context_,
325                           decode_frame->data, decode_frame->linesize,
326                           0, codec_context_->height,
327                           terminal_fb_->row_data(),
328                           terminal_fb_->stride());
329                 AlphaBlendFramebuffer();
330                 const int dy = is_first ? 0 : -terminal_fb_->height();
331                 sink(center_indentation_, dy, *terminal_fb_,
332                      is_first
333                      ? SeqType::StartOfAnimation
334                      : SeqType::AnimationFrame,
335                      time_from_first_frame);
336                 is_first = false;
337                 if (frame_limit) --remaining_frames;
338                 ++observed_frame_count;
339             }
340         }
341     }
342 
343     av_frame_free(&decode_frame);
344     av_packet_free(&packet);
345 }
346 
347 }  // namespace timg
348