1 /* This file is part of the Pangolin Project.
2  * http://github.com/stevenlovegrove/Pangolin
3  *
4  * Copyright (c) 2011 Steven Lovegrove
5  *
6  * Permission is hereby granted, free of charge, to any person
7  * obtaining a copy of this software and associated documentation
8  * files (the "Software"), to deal in the Software without
9  * restriction, including without limitation the rights to use,
10  * copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following
13  * conditions:
14  *
15  * The above copyright notice and this permission notice shall be
16  * included in all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #pragma once
29 
30 // Pangolin video supports various cameras and file formats through
31 // different 3rd party libraries.
32 //
33 // Video URI's take the following form:
34 //  scheme:[param1=value1,param2=value2,...]//device
35 //
36 // scheme = file | files | pango | shmem | dc1394 | uvc | v4l | openni2 |
37 //          openni | depthsense | pleora | teli | mjpeg | test |
38 //          thread | convert | debayer | split | join | shift | mirror | unpack
39 //
40 // file/files - read one or more streams from image file(s) / video
41 //  e.g. "files://~/data/dataset/img_*.jpg"
42 //  e.g. "files://~/data/dataset/img_[left,right]_*.pgm"
43 //  e.g. "files:///home/user/sequence/foo%03d.jpeg"
44 //
45 //  e.g. "file:[fmt=GRAY8,size=640x480]///home/user/raw_image.bin"
46 //  e.g. "file:[realtime=1]///home/user/video/movie.pango"
47 //  e.g. "file:[stream=1]///home/user/video/movie.avi"
48 //
49 // dc1394 - capture video through a firewire camera
50 //  e.g. "dc1394:[fmt=RGB24,size=640x480,fps=30,iso=400,dma=10]//0"
51 //  e.g. "dc1394:[fmt=FORMAT7_1,size=640x480,pos=2+2,iso=400,dma=10]//0"
52 //  e.g. "dc1394:[fmt=FORMAT7_3,deinterlace=1]//0"
53 //
54 // v4l - capture video from a Video4Linux (USB) camera (normally YUVY422 format)
55 //           method=mmap|read|userptr
56 //  e.g. "v4l:///dev/video0"
57 //  e.g. "v4l[method=mmap]:///dev/video0"
58 //
59 // openni2 - capture video / depth from OpenNI2 SDK  (Kinect / Xtrion etc)
60 //           imgN=grey|rgb|ir|ir8|ir24|depth|reg_depth
61 //  e.g. "openni2://'
62 //  e.g. "openni2:[img1=rgb,img2=depth,coloursync=true]//"
63 //  e.g. "openni2:[img1=depth,close=closerange,holefilter=true]//"
64 //  e.g. "openni2:[size=320x240,fps=60,img1=ir]//"
65 //
66 // openni - capture video / depth from OpenNI 1.0 SDK (Kinect / Xtrion etc)
67 //           Sensor modes containing '8' will truncate to 8-bits.
68 //           Sensor modes containing '+' explicitly enable IR illuminator
69 //           imgN=rgb|ir|ir8|ir+|ir8+|depth|reg_depth
70 //           autoexposure=true|false
71 //  e.g. "openni://'
72 //  e.g. "openni:[img1=rgb,img2=depth]//"
73 //  e.g. "openni:[size=320x240,fps=60,img1=ir]//"
74 //
75 // depthsense - capture video / depth from DepthSense SDK.
76 //              DepthSenseViewer can be used to alter capture settings.
77 //              imgN=depth|rgb
78 //              sizeN=QVGA|320x240|...
79 //              fpsN=25|30|60|...
80 //  e.g. "depthsense://"
81 //  e.g. "depthsense:[img1=depth,img2=rgb]//"
82 //
83 // pleora - USB 3 vision cameras accepts any option in the same format reported by eBUSPlayer
84 //  e.g. for lightwise cameras: "pleora:[size=512x256,pos=712x512,sn=00000274,ExposureTime=10000,PixelFormat=Mono12p,AcquisitionMode=SingleFrame,TriggerSource=Line0,TriggerMode=On]//"
85 //  e.g. for toshiba cameras: "pleora:[size=512x256,pos=712x512,sn=0300056,PixelSize=Bpp12,ExposureTime=10000,ImageFormatSelector=Format1,BinningHorizontal=2,BinningVertical=2]//"
86 //  e.g. toshiba alternated "pleora:[UserSetSelector=UserSet1,ExposureTime=10000,PixelSize=Bpp12,Width=1400,OffsetX=0,Height=1800,OffsetY=124,LineSelector=Line1,LineSource=ExposureActive,LineSelector=Line2,LineSource=Off,LineModeAll=6,LineInverterAll=6,UserSetSave=Execute,
87 //                                   UserSetSelector=UserSet2,PixelSize=Bpp12,Width=1400,OffsetX=1048,Height=1800,OffsetY=124,ExposureTime=10000,LineSelector=Line1,LineSource=Off,LineSelector=Line2,LineSource=ExposureActive,LineModeAll=6,LineInverterAll=6,UserSetSave=Execute,
88 //                                   SequentialShutterIndex=1,SequentialShutterEntry=1,SequentialShutterIndex=2,SequentialShutterEntry=2,SequentialShutterTerminateAt=2,SequentialShutterEnable=On,,AcquisitionFrameRateControl=Manual,AcquisitionFrameRate=70]//"
89 //
90 // thread - thread that continuously pulls from the child streams so that data in, unpacking, debayering etc can be decoupled from the main application thread
91 //  e.g. thread://pleora://
92 //  e.g. thread://unpack://pleora:[PixelFormat=Mono12p]//
93 //
94 // convert - use FFMPEG to convert between video pixel formats
95 //  e.g. "convert:[fmt=RGB24]//v4l:///dev/video0"
96 //  e.g. "convert:[fmt=GRAY8]//v4l:///dev/video0"
97 //
98 // mjpeg - capture from (possibly networked) motion jpeg stream using FFMPEG
99 //  e.g. "mjpeg://http://127.0.0.1/?action=stream"
100 //
101 // debayer - debayer an input video stream
102 //  e.g.  "debayer:[tile="BGGR",method="downsample"]//v4l:///dev/video0
103 //
104 // split - split an input video into a one or more streams based on Region of Interest / memory specification
105 //           roiN=X+Y+WxH
106 //           memN=Offset:WxH:PitchBytes:Format
107 //  e.g. "split:[roi1=0+0+640x480,roi2=640+0+640x480]//files:///home/user/sequence/foo%03d.jpeg"
108 //  e.g. "split:[mem1=307200:640x480:1280:GRAY8,roi2=640+0+640x480]//files:///home/user/sequence/foo%03d.jpeg"
109 //  e.g. "split:[stream1=2,stream2=1]//pango://video.pango"
110 //
111 // truncate - select a subregion of a video based on start and end (last index+1) index
112 //  e.g. Generate 30 random frames: "truncate:[end=30]//test://"
113 //  e.g. "truncate:[begin=100,end=120]"
114 //
115 // join - join streams
116 //  e.g. "join:[sync_tolerance_us=100, sync_continuously=true]//{pleora:[sn=00000274]//}{pleora:[sn=00000275]//}"
117 //
118 // test - output test video sequence
119 //  e.g. "test://"
120 //  e.g. "test:[size=640x480,fmt=RGB24]//"
121 
122 #include <pangolin/utils/uri.h>
123 #include <pangolin/video/video_exception.h>
124 #include <pangolin/video/video_interface.h>
125 #include <pangolin/video/video_output_interface.h>
126 
127 namespace pangolin
128 {
129 
130 //! Open Video Interface from string specification (as described in this files header)
131 PANGOLIN_EXPORT
132 std::unique_ptr<VideoInterface> OpenVideo(const std::string& uri);
133 
134 //! Open Video Interface from Uri specification
135 PANGOLIN_EXPORT
136 std::unique_ptr<VideoInterface> OpenVideo(const Uri& uri);
137 
138 //! Open VideoOutput Interface from string specification (as described in this files header)
139 PANGOLIN_EXPORT
140 std::unique_ptr<VideoOutputInterface> OpenVideoOutput(const std::string& str_uri);
141 
142 //! Open VideoOutput Interface from Uri specification
143 PANGOLIN_EXPORT
144 std::unique_ptr<VideoOutputInterface> OpenVideoOutput(const Uri& uri);
145 
146 //! Create vector of matching interfaces either through direct cast or filter interface.
147 template<typename T>
FindMatchingVideoInterfaces(VideoInterface & video)148 std::vector<T*> FindMatchingVideoInterfaces( VideoInterface& video )
149 {
150     std::vector<T*> matches;
151 
152     T* vid = dynamic_cast<T*>(&video);
153     if(vid) {
154         matches.push_back(vid);
155     }
156 
157     VideoFilterInterface* vidf = dynamic_cast<VideoFilterInterface*>(&video);
158     if(vidf) {
159         std::vector<T*> fmatches = vidf->FindMatchingStreams<T>();
160         matches.insert(matches.begin(), fmatches.begin(), fmatches.end());
161     }
162 
163     return matches;
164 }
165 
166 template<typename T>
FindFirstMatchingVideoInterface(VideoInterface & video)167 T* FindFirstMatchingVideoInterface( VideoInterface& video )
168 {
169     T* vid = dynamic_cast<T*>(&video);
170     if(vid) {
171         return vid;
172     }
173 
174     VideoFilterInterface* vidf = dynamic_cast<VideoFilterInterface*>(&video);
175     if(vidf) {
176         std::vector<T*> fmatches = vidf->FindMatchingStreams<T>();
177         if(fmatches.size()) {
178             return fmatches[0];
179         }
180     }
181 
182     return 0;
183 }
184 
185 inline
GetVideoFrameProperties(VideoInterface * video)186 picojson::value GetVideoFrameProperties(VideoInterface* video)
187 {
188     VideoPropertiesInterface* pi = dynamic_cast<VideoPropertiesInterface*>(video);
189     VideoFilterInterface* fi = dynamic_cast<VideoFilterInterface*>(video);
190 
191     if(pi) {
192         return pi->FrameProperties();
193     }else if(fi){
194         if(fi->InputStreams().size() == 1) {
195             return GetVideoFrameProperties(fi->InputStreams()[0]);
196         }else if(fi->InputStreams().size() > 0){
197             picojson::value streams;
198 
199             for(size_t i=0; i< fi->InputStreams().size(); ++i) {
200                 const picojson::value dev_props = GetVideoFrameProperties(fi->InputStreams()[i]);
201                 if(dev_props.contains("streams")) {
202                     const picojson::value& dev_streams = dev_props["streams"];
203                     for(size_t j=0; j < dev_streams.size(); ++j) {
204                         streams.push_back(dev_streams[j]);
205                     }
206                 }else{
207                     streams.push_back(dev_props);
208                 }
209             }
210 
211             if(streams.size() > 1) {
212                 picojson::value json = streams[0];
213                 json["streams"] = streams;
214                 return json;
215             }else{
216                 return streams[0];
217             }
218         }
219     }
220     return picojson::value();
221 }
222 
223 inline
GetVideoDeviceProperties(VideoInterface * video)224 picojson::value GetVideoDeviceProperties(VideoInterface* video)
225 {
226     VideoPropertiesInterface* pi = dynamic_cast<VideoPropertiesInterface*>(video);
227     VideoFilterInterface* fi = dynamic_cast<VideoFilterInterface*>(video);
228 
229     if(pi) {
230         return pi->DeviceProperties();
231     }else if(fi){
232         if(fi->InputStreams().size() == 1) {
233             return GetVideoDeviceProperties(fi->InputStreams()[0]);
234         }else if(fi->InputStreams().size() > 0){
235             picojson::value streams;
236 
237             for(size_t i=0; i< fi->InputStreams().size(); ++i) {
238                 const picojson::value dev_props = GetVideoDeviceProperties(fi->InputStreams()[i]);
239                 if(dev_props.contains("streams")) {
240                     const picojson::value& dev_streams = dev_props["streams"];
241                     for(size_t j=0; j < dev_streams.size(); ++j) {
242                         streams.push_back(dev_streams[j]);
243                     }
244                 }else{
245                     streams.push_back(dev_props);
246                 }
247             }
248 
249             if(streams.size() > 1) {
250                 picojson::value json = streams[0];
251                 json["streams"] = streams;
252                 return json;
253             }else{
254                 return streams[0];
255             }
256         }
257     }
258     return picojson::value();
259 }
260 
261 }
262