1 /* json.c
2
3 Copyright (c) 2003-2021 HandBrake Team
4 This file is part of the HandBrake source code
5 Homepage: <http://handbrake.fr/>.
6 It may be used under the terms of the GNU General Public License v2.
7 For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
8 */
9
10 #include <jansson.h>
11 #include "handbrake/handbrake.h"
12 #include "handbrake/hb_json.h"
13 #include "libavutil/base64.h"
14 #include "handbrake/qsv_common.h"
15
16 /**
17 * Convert an hb_state_t to a jansson dict
18 * @param state - Pointer to hb_state_t to convert
19 */
hb_state_to_dict(hb_state_t * state)20 hb_dict_t* hb_state_to_dict( hb_state_t * state)
21 {
22 const char * state_s;
23 hb_dict_t *dict = NULL;
24 json_error_t error;
25
26 switch (state->state)
27 {
28 case HB_STATE_IDLE:
29 state_s = "IDLE";
30 break;
31 case HB_STATE_SCANNING:
32 state_s = "SCANNING";
33 break;
34 case HB_STATE_SCANDONE:
35 state_s = "SCANDONE";
36 break;
37 case HB_STATE_WORKING:
38 state_s = "WORKING";
39 break;
40 case HB_STATE_PAUSED:
41 state_s = "PAUSED";
42 break;
43 case HB_STATE_SEARCHING:
44 state_s = "SEARCHING";
45 break;
46 case HB_STATE_WORKDONE:
47 state_s = "WORKDONE";
48 break;
49 case HB_STATE_MUXING:
50 state_s = "MUXING";
51 break;
52 default:
53 state_s = "UNKNOWN";
54 break;
55 }
56
57 switch (state->state)
58 {
59 case HB_STATE_IDLE:
60 dict = json_pack_ex(&error, 0, "{s:o}",
61 "State", hb_value_string(state_s));
62 break;
63 case HB_STATE_SCANNING:
64 case HB_STATE_SCANDONE:
65 dict = json_pack_ex(&error, 0,
66 "{s:o, s{s:o, s:o, s:o, s:o, s:o, s:o}}",
67 "State", hb_value_string(state_s),
68 "Scanning",
69 "SequenceID", hb_value_int(state->sequence_id),
70 "Progress", hb_value_double(state->param.scanning.progress),
71 "Preview", hb_value_int(state->param.scanning.preview_cur),
72 "PreviewCount", hb_value_int(state->param.scanning.preview_count),
73 "Title", hb_value_int(state->param.scanning.title_cur),
74 "TitleCount", hb_value_int(state->param.scanning.title_count));
75 break;
76 case HB_STATE_WORKING:
77 case HB_STATE_PAUSED:
78 case HB_STATE_SEARCHING:
79 dict = json_pack_ex(&error, 0,
80 "{s:o, s{s:o, s:o, s:o, s:o, s:o, s:o,"
81 " s:o, s:o, s:o, s:o, s:o, s:o}}",
82 "State", hb_value_string(state_s),
83 "Working",
84 "Progress", hb_value_double(state->param.working.progress),
85 "PassID", hb_value_int(state->param.working.pass_id),
86 "Pass", hb_value_int(state->param.working.pass),
87 "PassCount", hb_value_int(state->param.working.pass_count),
88 "Rate", hb_value_double(state->param.working.rate_cur),
89 "RateAvg", hb_value_double(state->param.working.rate_avg),
90 "ETASeconds", hb_value_int(state->param.working.eta_seconds),
91 "Hours", hb_value_int(state->param.working.hours),
92 "Minutes", hb_value_int(state->param.working.minutes),
93 "Paused", hb_value_int(state->param.working.paused),
94 "Seconds", hb_value_int(state->param.working.seconds),
95 "SequenceID", hb_value_int(state->sequence_id));
96 break;
97 case HB_STATE_WORKDONE:
98 dict = json_pack_ex(&error, 0,
99 "{s:o, s{s:o, s:o}}",
100 "State", hb_value_string(state_s),
101 "WorkDone",
102 "SequenceID", hb_value_int(state->sequence_id),
103 "Error", hb_value_int(state->param.working.error));
104 break;
105 case HB_STATE_MUXING:
106 dict = json_pack_ex(&error, 0,
107 "{s:o, s{s:o}}",
108 "State", hb_value_string(state_s),
109 "Muxing",
110 "Progress", hb_value_double(state->param.muxing.progress));
111 break;
112 default:
113 dict = json_pack_ex(&error, 0, "{s:o}",
114 "State", hb_value_string(state_s));
115 hb_error("hb_state_to_dict: unrecognized state %d", state->state);
116 break;
117 }
118 if (dict == NULL)
119 {
120 hb_error("hb_state_to_dict, json pack failure: %s", error.text);
121 }
122 return dict;
123 }
124
hb_version_dict()125 hb_dict_t * hb_version_dict()
126 {
127 hb_dict_t * dict;
128 json_error_t error;
129
130 dict = json_pack_ex(&error, 0,
131 "{s:o, s:o, s:o, s{s:o, s:o, s:o}, s:o, s:o, s:o, s:o, s:o}",
132 "Name", hb_value_string(HB_PROJECT_NAME),
133 "Official", hb_value_bool(HB_PROJECT_REPO_OFFICIAL),
134 "Type", hb_value_string(HB_PROJECT_REPO_TYPE),
135 "Version",
136 "Major", hb_value_int(HB_PROJECT_VERSION_MAJOR),
137 "Minor", hb_value_int(HB_PROJECT_VERSION_MINOR),
138 "Point", hb_value_int(HB_PROJECT_VERSION_POINT),
139 "VersionString", hb_value_string(HB_PROJECT_VERSION),
140 "RepoHash", hb_value_string(HB_PROJECT_REPO_HASH),
141 "RepoDate", hb_value_string(HB_PROJECT_REPO_DATE),
142 "System", hb_value_string(HB_PROJECT_HOST_SYSTEMF),
143 "Arch", hb_value_string(HB_PROJECT_HOST_ARCH));
144 if (dict == NULL)
145 {
146 hb_error("hb_version_dict, json pack failure: %s", error.text);
147 return NULL;
148 }
149
150 return dict;
151 }
152
153 /**
154 * Get the current state of an hb instance as a json string
155 * @param h - Pointer to an hb_handle_t hb instance
156 */
hb_get_state_json(hb_handle_t * h)157 char* hb_get_state_json( hb_handle_t * h )
158 {
159 hb_state_t state;
160
161 hb_get_state(h, &state);
162 hb_dict_t *dict = hb_state_to_dict(&state);
163
164 char *json_state = hb_value_get_json(dict);
165 hb_value_free(&dict);
166
167 return json_state;
168 }
169
hb_audio_attributes_to_dict(uint32_t attributes)170 hb_dict_t * hb_audio_attributes_to_dict(uint32_t attributes)
171 {
172 json_error_t error;
173 hb_dict_t * dict;
174
175 dict = json_pack_ex(&error, 0,
176 "{s:o, s:o, s:o, s:o, s:o, s:o}",
177 "Normal", hb_value_bool(attributes & HB_AUDIO_ATTR_NORMAL),
178 "VisuallyImpaired", hb_value_bool(attributes &
179 HB_AUDIO_ATTR_VISUALLY_IMPAIRED),
180 "Commentary", hb_value_bool(attributes &
181 HB_AUDIO_ATTR_COMMENTARY),
182 "AltCommentary", hb_value_bool(attributes &
183 HB_AUDIO_ATTR_ALT_COMMENTARY),
184 "Secondary", hb_value_bool(attributes & HB_AUDIO_ATTR_SECONDARY),
185 "Default", hb_value_bool(attributes & HB_AUDIO_ATTR_DEFAULT));
186 if (dict == NULL)
187 {
188 hb_error("hb_audio_attributes_to_dict, json pack failure: %s", error.text);
189 }
190 return dict;
191 }
192
hb_subtitle_attributes_to_dict(uint32_t attributes)193 hb_dict_t * hb_subtitle_attributes_to_dict(uint32_t attributes)
194 {
195 json_error_t error;
196 hb_dict_t * dict;
197
198 dict = json_pack_ex(&error, 0,
199 "{s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o}",
200 "Normal", hb_value_bool(attributes & HB_SUBTITLE_ATTR_NORMAL),
201 "Large", hb_value_bool(attributes & HB_SUBTITLE_ATTR_LARGE),
202 "Children", hb_value_bool(attributes & HB_SUBTITLE_ATTR_CHILDREN),
203 "ClosedCaption", hb_value_bool(attributes & HB_SUBTITLE_ATTR_CC),
204 "Forced", hb_value_bool(attributes & HB_SUBTITLE_ATTR_FORCED),
205 "Commentary", hb_value_bool(attributes &
206 HB_SUBTITLE_ATTR_COMMENTARY),
207 "4By3", hb_value_bool(attributes & HB_SUBTITLE_ATTR_4_3),
208 "Wide", hb_value_bool(attributes & HB_SUBTITLE_ATTR_WIDE),
209 "Letterbox", hb_value_bool(attributes & HB_SUBTITLE_ATTR_LETTERBOX),
210 "PanScan", hb_value_bool(attributes & HB_SUBTITLE_ATTR_PANSCAN),
211 "Default", hb_value_bool(attributes & HB_SUBTITLE_ATTR_DEFAULT));
212 if (dict == NULL)
213 {
214 hb_error("hb_subtitle_attributes_to_dict, json pack failure: %s", error.text);
215 }
216 return dict;
217 }
218
hb_title_to_dict_internal(hb_title_t * title)219 static hb_dict_t* hb_title_to_dict_internal( hb_title_t *title )
220 {
221 hb_dict_t *dict;
222 json_error_t error;
223 int ii;
224
225 if (title == NULL)
226 return NULL;
227
228 dict = json_pack_ex(&error, 0,
229 "{"
230 // Type, Path, Name, Index, Playlist, AngleCount
231 "s:o, s:o, s:o, s:o, s:o, s:o,"
232 // Duration {Ticks, Hours, Minutes, Seconds}
233 "s:{s:o, s:o, s:o, s:o},"
234 // Geometry {Width, Height, PAR {Num, Den},
235 "s:{s:o, s:o, s:{s:o, s:o}},"
236 // Crop[Top, Bottom, Left, Right]}
237 "s:[oooo],"
238 // Color {Format, Range, Primary, Transfer, Matrix}
239 "s:{s:o, s:o, s:o, s:o, s:o},"
240 // FrameRate {Num, Den}
241 "s:{s:o, s:o},"
242 // InterlaceDetected, VideoCodec
243 "s:o, s:o,"
244 // Metadata
245 "s:o"
246 "}",
247 "Type", hb_value_int(title->type),
248 "Path", hb_value_string(title->path),
249 "Name", hb_value_string(title->name),
250 "Index", hb_value_int(title->index),
251 "Playlist", hb_value_int(title->playlist),
252 "AngleCount", hb_value_int(title->angle_count),
253 "Duration",
254 "Ticks", hb_value_int(title->duration),
255 "Hours", hb_value_int(title->hours),
256 "Minutes", hb_value_int(title->minutes),
257 "Seconds", hb_value_int(title->seconds),
258 "Geometry",
259 "Width", hb_value_int(title->geometry.width),
260 "Height", hb_value_int(title->geometry.height),
261 "PAR",
262 "Num", hb_value_int(title->geometry.par.num),
263 "Den", hb_value_int(title->geometry.par.den),
264 "Crop", hb_value_int(title->crop[0]),
265 hb_value_int(title->crop[1]),
266 hb_value_int(title->crop[2]),
267 hb_value_int(title->crop[3]),
268 "Color",
269 "Format", hb_value_int(title->pix_fmt),
270 "Range", hb_value_int(title->color_range),
271 "Primary", hb_value_int(title->color_prim),
272 "Transfer", hb_value_int(title->color_transfer),
273 "Matrix", hb_value_int(title->color_matrix),
274 "FrameRate",
275 "Num", hb_value_int(title->vrate.num),
276 "Den", hb_value_int(title->vrate.den),
277 "InterlaceDetected", hb_value_bool(title->detected_interlacing),
278 "VideoCodec", hb_value_string(title->video_codec_name),
279 "Metadata", hb_value_dup(title->metadata->dict)
280 );
281 if (dict == NULL)
282 {
283 hb_error("hb_title_to_dict_internal, json pack failure: %s", error.text);
284 return NULL;
285 }
286
287 if (title->container_name != NULL)
288 {
289 hb_dict_set(dict, "Container", hb_value_string(title->container_name));
290 }
291
292 // process chapter list
293 hb_dict_t * chapter_list = hb_value_array_init();
294 for (ii = 0; ii < hb_list_count(title->list_chapter); ii++)
295 {
296 hb_dict_t *chapter_dict;
297 char *name = "";
298 hb_chapter_t *chapter = hb_list_item(title->list_chapter, ii);
299 if (chapter->title != NULL)
300 name = chapter->title;
301
302 chapter_dict = json_pack_ex(&error, 0,
303 "{s:o, s:{s:o, s:o, s:o, s:o}}",
304 "Name", hb_value_string(name),
305 "Duration",
306 "Ticks", hb_value_int(chapter->duration),
307 "Hours", hb_value_int(chapter->hours),
308 "Minutes", hb_value_int(chapter->minutes),
309 "Seconds", hb_value_int(chapter->seconds)
310 );
311 if (chapter_dict == NULL)
312 {
313 hb_error("hb_title_to_dict_internal, chapter, json pack failure: %s", error.text);
314 return NULL;
315 }
316 hb_value_array_append(chapter_list, chapter_dict);
317 }
318 hb_dict_set(dict, "ChapterList", chapter_list);
319
320 // process audio list
321 hb_dict_t * audio_list = hb_value_array_init();
322 for (ii = 0; ii < hb_list_count(title->list_audio); ii++)
323 {
324 const char * codec_name;
325 char channel_layout_name[64];
326 int channel_count, lfe_count;
327 hb_dict_t * audio_dict, * attributes;
328 hb_audio_t * audio = hb_list_item(title->list_audio, ii);
329
330 codec_name = hb_audio_decoder_get_name(audio->config.in.codec,
331 audio->config.in.codec_param);
332 hb_layout_get_name(channel_layout_name, sizeof(channel_layout_name),
333 audio->config.in.channel_layout);
334 channel_count = hb_layout_get_discrete_channel_count(
335 audio->config.in.channel_layout);
336 lfe_count = hb_layout_get_low_freq_channel_count(
337 audio->config.in.channel_layout);
338
339
340 attributes = hb_audio_attributes_to_dict(audio->config.lang.attributes);
341 audio_dict = json_pack_ex(&error, 0,
342 "{s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o}",
343 "TrackNumber", hb_value_int(ii + 1),
344 "Description", hb_value_string(audio->config.lang.description),
345 "Language", hb_value_string(audio->config.lang.simple),
346 "LanguageCode", hb_value_string(audio->config.lang.iso639_2),
347 "Attributes", attributes,
348 "Codec", hb_value_int(audio->config.in.codec),
349 "CodecParam", hb_value_int(audio->config.in.codec_param),
350 "CodecName", hb_value_string(codec_name),
351 "SampleRate", hb_value_int(audio->config.in.samplerate),
352 "BitRate", hb_value_int(audio->config.in.bitrate),
353 "ChannelLayout", hb_value_int(audio->config.in.channel_layout),
354 "ChannelLayoutName", hb_value_string(channel_layout_name),
355 "ChannelCount", hb_value_int(channel_count),
356 "LFECount", hb_value_int(lfe_count));
357 if (audio_dict == NULL)
358 {
359 hb_error("hb_title_to_dict_internal, audio, json pack failure: %s", error.text);
360 return NULL;
361 }
362 if (audio->config.in.name != NULL)
363 {
364 hb_dict_set_string(audio_dict, "Name", audio->config.in.name);
365 }
366 hb_value_array_append(audio_list, audio_dict);
367 }
368 hb_dict_set(dict, "AudioList", audio_list);
369
370 // process subtitle list
371 hb_value_array_t * subtitle_list = hb_value_array_init();
372 for (ii = 0; ii < hb_list_count(title->list_subtitle); ii++)
373 {
374 const char * format;
375 hb_dict_t * subtitle_dict, * attributes;
376 hb_subtitle_t * subtitle = hb_list_item(title->list_subtitle, ii);
377
378 format = subtitle->format == PICTURESUB ? "bitmap" : "text";
379 attributes = hb_subtitle_attributes_to_dict(subtitle->attributes);
380 subtitle_dict = json_pack_ex(&error, 0,
381 "{s:o, s:o, s:o, s:o, s:o, s:o, s:o}",
382 "TrackNumber", hb_value_int(ii + 1),
383 "Format", hb_value_string(format),
384 "Source", hb_value_int(subtitle->source),
385 "SourceName", hb_value_string(hb_subsource_name(subtitle->source)),
386 "Attributes", attributes,
387 "Language", hb_value_string(subtitle->lang),
388 "LanguageCode", hb_value_string(subtitle->iso639_2));
389 if (subtitle_dict == NULL)
390 {
391 hb_error("hb_title_to_dict_internal, subtitle, json pack failure: %s", error.text);
392 return NULL;
393 }
394 if (subtitle->name != NULL)
395 {
396 hb_dict_set_string(subtitle_dict, "Name", subtitle->name);
397 }
398 hb_value_array_append(subtitle_list, subtitle_dict);
399 }
400 hb_dict_set(dict, "SubtitleList", subtitle_list);
401
402 return dict;
403 }
404
405 /**
406 * Convert an hb_title_t to a jansson dict
407 * @param title - Pointer to the hb_title_t to convert
408 */
hb_title_to_dict(hb_handle_t * h,int title_index)409 hb_dict_t* hb_title_to_dict( hb_handle_t *h, int title_index )
410 {
411 hb_title_t *title = hb_find_title_by_index(h, title_index);
412 return hb_title_to_dict_internal(title);
413 }
414
415 /**
416 * Convert an hb_title_set_t to a jansson dict
417 * @param title - Pointer to the hb_title_set_t to convert
418 */
hb_title_set_to_dict(const hb_title_set_t * title_set)419 hb_dict_t* hb_title_set_to_dict( const hb_title_set_t * title_set )
420 {
421 hb_dict_t *dict;
422 json_error_t error;
423 int ii;
424
425 dict = json_pack_ex(&error, 0,
426 "{s:o, s:[]}",
427 "MainFeature", hb_value_int(title_set->feature),
428 "TitleList");
429 // process title list
430 hb_dict_t *title_list = hb_dict_get(dict, "TitleList");
431 for (ii = 0; ii < hb_list_count(title_set->list_title); ii++)
432 {
433 hb_title_t *title = hb_list_item(title_set->list_title, ii);
434 hb_dict_t *title_dict = hb_title_to_dict_internal(title);
435 hb_value_array_append(title_list, title_dict);
436 }
437
438 return dict;
439 }
440
441 /**
442 * Convert an hb_title_t to a json string
443 * @param title - Pointer to hb_title_t to convert
444 */
hb_title_to_json(hb_handle_t * h,int title_index)445 char* hb_title_to_json( hb_handle_t *h, int title_index )
446 {
447 hb_dict_t *dict = hb_title_to_dict(h, title_index);
448 if (dict == NULL)
449 return NULL;
450
451 char *json_title = hb_value_get_json(dict);
452 hb_value_free(&dict);
453
454 return json_title;
455 }
456
457 /**
458 * Get the current title set of an hb instance as a json string
459 * @param h - Pointer to hb_handle_t hb instance
460 */
hb_get_title_set_json(hb_handle_t * h)461 char* hb_get_title_set_json( hb_handle_t * h )
462 {
463 hb_dict_t *dict = hb_title_set_to_dict(hb_get_title_set(h));
464
465 char *json_title_set = hb_value_get_json(dict);
466 hb_value_free(&dict);
467
468 return json_title_set;
469 }
470
471 /**
472 * Convert an hb_job_t to an hb_dict_t
473 * @param job - Pointer to the hb_job_t to convert
474 */
hb_job_to_dict(const hb_job_t * job)475 hb_dict_t* hb_job_to_dict( const hb_job_t * job )
476 {
477 hb_dict_t * dict;
478 json_error_t error;
479 int subtitle_search_burn;
480 int ii;
481 int adapter_index;
482
483 #if HB_PROJECT_FEATURE_QSV
484 if (job->qsv.ctx){
485 adapter_index = job->qsv.ctx->dx_index;
486 }
487
488 #else
489 adapter_index = 0;
490 #endif
491
492 if (job == NULL || job->title == NULL)
493 return NULL;
494
495 // Assumes that the UI has reduced geometry settings to only the
496 // necessary PAR value
497
498 subtitle_search_burn = job->select_subtitle_config.dest == RENDERSUB;
499 dict = json_pack_ex(&error, 0,
500 "{"
501 // SequenceID
502 "s:o,"
503 // Destination {Mux, InlineParameterSets, AlignAVStart,
504 // ChapterMarkers, ChapterList}
505 "s:{s:o, s:o, s:o, s:o, s:[]},"
506 // Source {Path, Title, Angle}
507 "s:{s:o, s:o, s:o,},"
508 // PAR {Num, Den}
509 "s:{s:o, s:o},"
510 // Video {Encoder, QSV {Decode, AsyncDepth, AdapterIndex}}
511 "s:{s:o, s:{s:o, s:o, s:o}},"
512 // Audio {CopyMask, FallbackEncoder, AudioList []}
513 "s:{s:[], s:o, s:[]},"
514 // Subtitles {Search {Enable, Forced, Default, Burn}, SubtitleList []}
515 "s:{s:{s:o, s:o, s:o, s:o}, s:[]},"
516 // Metadata
517 "s:o,"
518 // Filters {FilterList []}
519 "s:{s:[]}"
520 "}",
521 "SequenceID", hb_value_int(job->sequence_id),
522 "Destination",
523 "Mux", hb_value_int(job->mux),
524 "InlineParameterSets", hb_value_bool(job->inline_parameter_sets),
525 "AlignAVStart", hb_value_bool(job->align_av_start),
526 "ChapterMarkers", hb_value_bool(job->chapter_markers),
527 "ChapterList",
528 "Source",
529 "Path", hb_value_string(job->title->path),
530 "Title", hb_value_int(job->title->index),
531 "Angle", hb_value_int(job->angle),
532 "PAR",
533 "Num", hb_value_int(job->par.num),
534 "Den", hb_value_int(job->par.den),
535 "Video",
536 "Encoder", hb_value_int(job->vcodec),
537 "QSV",
538 "Decode", hb_value_bool(job->qsv.decode),
539 "AsyncDepth", hb_value_int(job->qsv.async_depth),
540 "AdapterIndex", hb_value_int(adapter_index),
541 "Audio",
542 "CopyMask",
543 "FallbackEncoder", hb_value_int(job->acodec_fallback),
544 "AudioList",
545 "Subtitle",
546 "Search",
547 "Enable", hb_value_bool(job->indepth_scan),
548 "Forced", hb_value_bool(job->select_subtitle_config.force),
549 "Default", hb_value_bool(job->select_subtitle_config.default_track),
550 "Burn", hb_value_bool(subtitle_search_burn),
551 "SubtitleList",
552 "Metadata", hb_value_dup(job->metadata->dict),
553 "Filters",
554 "FilterList"
555 );
556 if (dict == NULL)
557 {
558 hb_error("hb_job_to_dict, json pack failure: %s", error.text);
559 return NULL;
560 }
561 hb_dict_t *dest_dict = hb_dict_get(dict, "Destination");
562 if (job->file != NULL)
563 {
564 hb_dict_set(dest_dict, "File", hb_value_string(job->file));
565 }
566 if (job->mux & HB_MUX_MASK_MP4)
567 {
568 hb_dict_t *mp4_dict;
569 mp4_dict = json_pack_ex(&error, 0, "{s:o, s:o}",
570 "Mp4Optimize", hb_value_bool(job->mp4_optimize),
571 "IpodAtom", hb_value_bool(job->ipod_atom));
572 hb_dict_set(dest_dict, "Mp4Options", mp4_dict);
573 }
574 hb_dict_t *source_dict = hb_dict_get(dict, "Source");
575 hb_dict_t *range_dict;
576 if (job->start_at_preview > 0)
577 {
578 range_dict = json_pack_ex(&error, 0, "{s:o, s:o, s:o, s:o}",
579 "Type", hb_value_string("preview"),
580 "Start", hb_value_int(job->start_at_preview),
581 "End", hb_value_int(job->pts_to_stop),
582 "SeekPoints", hb_value_int(job->seek_points));
583 }
584 else if (job->pts_to_start != 0 || job->pts_to_stop != 0)
585 {
586 range_dict = hb_dict_init();
587 hb_dict_set(range_dict, "Type", hb_value_string("time"));
588 if (job->pts_to_start > 0)
589 {
590 hb_dict_set(range_dict, "Start", hb_value_int(job->pts_to_start));
591 }
592 if (job->pts_to_stop > 0)
593 {
594 hb_dict_set(range_dict, "End",
595 hb_value_int(job->pts_to_start + job->pts_to_stop));
596 }
597 }
598 else if (job->frame_to_start != 0 || job->frame_to_stop != 0)
599 {
600 range_dict = hb_dict_init();
601 hb_dict_set(range_dict, "Type", hb_value_string("frame"));
602 if (job->frame_to_start > 0)
603 {
604 hb_dict_set(range_dict, "Start",
605 hb_value_int(job->frame_to_start + 1));
606 }
607 if (job->frame_to_stop > 0)
608 {
609 hb_dict_set(range_dict, "End",
610 hb_value_int(job->frame_to_start + job->frame_to_stop));
611 }
612 }
613 else
614 {
615 range_dict = json_pack_ex(&error, 0, "{s:o, s:o, s:o}",
616 "Type", hb_value_string("chapter"),
617 "Start", hb_value_int(job->chapter_start),
618 "End", hb_value_int(job->chapter_end));
619 }
620 hb_dict_set(source_dict, "Range", range_dict);
621
622 hb_dict_t *video_dict = hb_dict_get(dict, "Video");
623 hb_dict_set(video_dict, "ColorInputFormat",
624 hb_value_int(job->input_pix_fmt));
625 hb_dict_set(video_dict, "ColorOutputFormat",
626 hb_value_int(job->output_pix_fmt));
627 hb_dict_set(video_dict, "ColorRange",
628 hb_value_int(job->color_range));
629 hb_dict_set(video_dict, "ColorPrimaries",
630 hb_value_int(job->color_prim));
631 hb_dict_set(video_dict, "ColorTransfer",
632 hb_value_int(job->color_transfer));
633 hb_dict_set(video_dict, "ColorMatrix",
634 hb_value_int(job->color_matrix));
635 if (job->color_prim_override != HB_COLR_PRI_UNDEF)
636 {
637 hb_dict_set(video_dict, "ColorPrimariesOverride",
638 hb_value_int(job->color_prim_override));
639 }
640 if (job->color_transfer_override != HB_COLR_TRA_UNDEF)
641 {
642 hb_dict_set(video_dict, "ColorTransferOverride",
643 hb_value_int(job->color_transfer_override));
644 }
645 if (job->color_matrix_override != HB_COLR_MAT_UNDEF)
646 {
647 hb_dict_set(video_dict, "ColorMatrixOverride",
648 hb_value_int(job->color_matrix_override));
649 }
650
651 // Mastering metadata
652 hb_dict_t *mastering_dict;
653 if (job->mastering.has_primaries || job->mastering.has_luminance)
654 {
655 mastering_dict = json_pack_ex(&error, 0,
656 "{"
657 // DisplayPrimaries[3][2]
658 "s:[[[ii],[ii]],[[ii],[ii]],[[ii],[ii]]],"
659 // WhitePoint[2],
660 "s:[[i,i],[i,i]],"
661 // MinLuminance, MaxLuminance, HasPrimaries, HasLuminance
662 "s:[i,i],s:[i,i],s:b,s:b"
663 "}",
664 "DisplayPrimaries", job->mastering.display_primaries[0][0].num,
665 job->mastering.display_primaries[0][0].den,
666 job->mastering.display_primaries[0][1].num,
667 job->mastering.display_primaries[0][1].den,
668 job->mastering.display_primaries[1][0].num,
669 job->mastering.display_primaries[1][0].den,
670 job->mastering.display_primaries[1][1].num,
671 job->mastering.display_primaries[1][1].den,
672 job->mastering.display_primaries[2][0].num,
673 job->mastering.display_primaries[2][0].den,
674 job->mastering.display_primaries[2][1].num,
675 job->mastering.display_primaries[2][1].den,
676 "WhitePoint", job->mastering.white_point[0].num,
677 job->mastering.white_point[0].den,
678 job->mastering.white_point[1].num,
679 job->mastering.white_point[1].den,
680 "MinLuminance", job->mastering.min_luminance.num,
681 job->mastering.min_luminance.den,
682 "MaxLuminance", job->mastering.max_luminance.num,
683 job->mastering.max_luminance.den,
684 "HasPrimaries", job->mastering.has_primaries,
685 "HasLuminance", job->mastering.has_luminance
686 );
687 hb_dict_set(video_dict, "Mastering", mastering_dict);
688 }
689
690 // Content Light Level metadata
691 hb_dict_t *coll_dict;
692 if (job->coll.max_cll && job->coll.max_fall)
693 {
694 coll_dict = json_pack_ex(&error, 0, "{s:i, s:i}",
695 "MaxCLL", job->coll.max_cll,
696 "MaxFALL", job->coll.max_fall);
697 hb_dict_set(video_dict, "ContentLightLevel", coll_dict);
698 }
699
700 if (job->vquality > HB_INVALID_VIDEO_QUALITY)
701 {
702 hb_dict_set(video_dict, "Quality", hb_value_double(job->vquality));
703 }
704 else
705 {
706 hb_dict_set(video_dict, "Bitrate", hb_value_int(job->vbitrate));
707 hb_dict_set(video_dict, "TwoPass", hb_value_bool(job->twopass));
708 hb_dict_set(video_dict, "Turbo",
709 hb_value_bool(job->fastfirstpass));
710 }
711 if (job->encoder_preset != NULL)
712 {
713 hb_dict_set(video_dict, "Preset",
714 hb_value_string(job->encoder_preset));
715 }
716 if (job->encoder_tune != NULL)
717 {
718 hb_dict_set(video_dict, "Tune", hb_value_string(job->encoder_tune));
719 }
720 if (job->encoder_profile != NULL)
721 {
722 hb_dict_set(video_dict, "Profile",
723 hb_value_string(job->encoder_profile));
724 }
725 if (job->encoder_level != NULL)
726 {
727 hb_dict_set(video_dict, "Level", hb_value_string(job->encoder_level));
728 }
729 if (job->encoder_options != NULL)
730 {
731 hb_dict_set(video_dict, "Options",
732 hb_value_string(job->encoder_options));
733 }
734
735 // process chapter list
736 hb_dict_t *chapter_list = hb_dict_get(dest_dict, "ChapterList");
737 for (ii = 0; ii < hb_list_count(job->list_chapter); ii++)
738 {
739 hb_dict_t *chapter_dict;
740 char *name = "";
741 hb_chapter_t *chapter = hb_list_item(job->list_chapter, ii);
742 if (chapter->title != NULL)
743 name = chapter->title;
744
745 chapter_dict = json_pack_ex(&error, 0,
746 "{s:o, s:{s:o, s:o, s:o, s:o}}",
747 "Name", hb_value_string(name),
748 "Duration",
749 "Ticks", hb_value_int(chapter->duration),
750 "Hours", hb_value_int(chapter->hours),
751 "Minutes", hb_value_int(chapter->minutes),
752 "Seconds", hb_value_int(chapter->seconds)
753 );
754 hb_value_array_append(chapter_list, chapter_dict);
755 }
756
757 // process filter list
758 hb_dict_t *filters_dict = hb_dict_get(dict, "Filters");
759 hb_value_array_t *filter_list = hb_dict_get(filters_dict, "FilterList");
760 for (ii = 0; ii < hb_list_count(job->list_filter); ii++)
761 {
762 hb_dict_t *filter_dict;
763 hb_filter_object_t *filter = hb_list_item(job->list_filter, ii);
764
765 filter_dict = json_pack_ex(&error, 0, "{s:o}",
766 "ID", hb_value_int(filter->id));
767 if (filter->settings != NULL)
768 {
769 hb_dict_set(filter_dict, "Settings",
770 hb_value_dup(filter->settings));
771 }
772
773 hb_value_array_append(filter_list, filter_dict);
774 }
775
776 hb_dict_t *audios_dict = hb_dict_get(dict, "Audio");
777 // Construct audio CopyMask
778 hb_value_array_t *copy_mask = hb_dict_get(audios_dict, "CopyMask");
779 int acodec;
780 for (acodec = 1; acodec != HB_ACODEC_PASS_FLAG; acodec <<= 1)
781 {
782 if (acodec & job->acodec_copy_mask)
783 {
784 const char *name;
785 name = hb_audio_encoder_get_name(acodec | HB_ACODEC_PASS_FLAG);
786 if (name != NULL)
787 {
788 hb_value_t *val = hb_value_string(name);
789 hb_value_array_append(copy_mask, val);
790 }
791 }
792 }
793 // process audio list
794 hb_dict_t *audio_list = hb_dict_get(audios_dict, "AudioList");
795 for (ii = 0; ii < hb_list_count(job->list_audio); ii++)
796 {
797 hb_dict_t *audio_dict;
798 hb_audio_t *audio = hb_list_item(job->list_audio, ii);
799
800 audio_dict = json_pack_ex(&error, 0,
801 "{s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o}",
802 "Track", hb_value_int(audio->config.in.track),
803 "Encoder", hb_value_int(audio->config.out.codec),
804 "Gain", hb_value_double(audio->config.out.gain),
805 "DRC", hb_value_double(audio->config.out.dynamic_range_compression),
806 "Mixdown", hb_value_int(audio->config.out.mixdown),
807 "NormalizeMixLevel", hb_value_bool(audio->config.out.normalize_mix_level),
808 "DitherMethod", hb_value_int(audio->config.out.dither_method),
809 "Samplerate", hb_value_int(audio->config.out.samplerate),
810 "Bitrate", hb_value_int(audio->config.out.bitrate),
811 "Quality", hb_value_double(audio->config.out.quality),
812 "CompressionLevel", hb_value_double(audio->config.out.compression_level));
813 if (audio->config.out.name != NULL)
814 {
815 hb_dict_set_string(audio_dict, "Name", audio->config.out.name);
816 }
817
818 hb_value_array_append(audio_list, audio_dict);
819 }
820
821 // process subtitle list
822 hb_dict_t *subtitles_dict = hb_dict_get(dict, "Subtitle");
823 hb_dict_t *subtitle_list = hb_dict_get(subtitles_dict, "SubtitleList");
824 for (ii = 0; ii < hb_list_count(job->list_subtitle); ii++)
825 {
826 hb_dict_t *subtitle_dict;
827 hb_subtitle_t *subtitle = hb_list_item(job->list_subtitle, ii);
828
829 if (subtitle->source == IMPORTSRT ||
830 subtitle->source == IMPORTSSA)
831 {
832 subtitle_dict = json_pack_ex(&error, 0,
833 "{s:o, s:o, s:o, s:{s:o, s:o, s:o}}",
834 "Default", hb_value_bool(subtitle->config.default_track),
835 "Burn", hb_value_bool(subtitle->config.dest == RENDERSUB),
836 "Offset", hb_value_int(subtitle->config.offset),
837 "Import",
838 "Format", hb_value_string(subtitle->source == IMPORTSRT ?
839 "SRT" : "SSA"),
840 "Filename", hb_value_string(subtitle->config.src_filename),
841 "Language", hb_value_string(subtitle->iso639_2));
842 if (subtitle->source == IMPORTSRT)
843 {
844 hb_dict_t *import_dict = hb_dict_get(subtitle_dict, "Import");
845 hb_dict_set(import_dict, "Codeset",
846 hb_value_string(subtitle->config.src_codeset));
847 }
848 }
849 else
850 {
851 subtitle_dict = json_pack_ex(&error, 0,
852 "{s:o, s:o, s:o, s:o, s:o}",
853 "Track", hb_value_int(subtitle->track),
854 "Default", hb_value_bool(subtitle->config.default_track),
855 "Forced", hb_value_bool(subtitle->config.force),
856 "Burn", hb_value_bool(subtitle->config.dest == RENDERSUB),
857 "Offset", hb_value_int(subtitle->config.offset));
858 }
859 if (subtitle->config.name != NULL)
860 {
861 hb_dict_set_string(subtitle_dict, "Name", subtitle->config.name);
862 }
863 hb_value_array_append(subtitle_list, subtitle_dict);
864 }
865
866 return dict;
867 }
868
869 /**
870 * Convert an hb_job_t to a json string
871 * @param job - Pointer to the hb_job_t to convert
872 */
hb_job_to_json(const hb_job_t * job)873 char* hb_job_to_json( const hb_job_t * job )
874 {
875 hb_dict_t *dict = hb_job_to_dict(job);
876
877 if (dict == NULL)
878 return NULL;
879
880 char *json_job = hb_value_get_json(dict);
881 hb_value_free(&dict);
882
883 return json_job;
884 }
885
886 // These functions exist only to perform type checking when using
887 // json_unpack_ex().
888 typedef const char * const_str_t;
889
unpack_f(double * f)890 static double* unpack_f(double *f) { return f; }
unpack_i(int * i)891 static int* unpack_i(int *i) { return i; }
unpack_u(unsigned * u)892 static unsigned* unpack_u(unsigned *u) { return u; }
unpack_I(json_int_t * i)893 static json_int_t* unpack_I(json_int_t *i) { return i; }
unpack_b(int * b)894 static int * unpack_b(int *b) { return b; }
unpack_s(const_str_t * s)895 static const_str_t* unpack_s(const_str_t *s){ return s; }
unpack_o(json_t ** o)896 static json_t** unpack_o(json_t** o) { return o; }
897
hb_json_job_scan(hb_handle_t * h,const char * json_job)898 void hb_json_job_scan( hb_handle_t * h, const char * json_job )
899 {
900 hb_dict_t * dict;
901 int result;
902 json_error_t error;
903
904 dict = hb_value_json(json_job);
905
906 int title_index;
907 const char *path = NULL;
908
909 result = json_unpack_ex(dict, &error, 0, "{s:{s:s, s:i}}",
910 "Source",
911 "Path", unpack_s(&path),
912 "Title", unpack_i(&title_index)
913 );
914 if (result < 0)
915 {
916 hb_error("json unpack failure, failed to find title: %s", error.text);
917 hb_value_free(&dict);
918 return;
919 }
920
921 // If the job wants to use Hardware decode, it must also be
922 // enabled during scan. So enable it here.
923 hb_scan(h, path, title_index, -1, 0, 0);
924
925 // Wait for scan to complete
926 hb_state_t state;
927 hb_get_state2(h, &state);
928 while (state.state == HB_STATE_SCANNING)
929 {
930 hb_snooze(50);
931 hb_get_state2(h, &state);
932 }
933 hb_value_free(&dict);
934 }
935
validate_audio_codec_mux(int codec,int mux,int track)936 static int validate_audio_codec_mux(int codec, int mux, int track)
937 {
938 const hb_encoder_t *enc = NULL;
939 while ((enc = hb_audio_encoder_get_next(enc)) != NULL)
940 {
941 if ((enc->codec == codec) && (enc->muxers & mux) == 0)
942 {
943 if (codec != HB_ACODEC_NONE)
944 {
945 hb_error("track %d: incompatible encoder '%s' for muxer '%s'",
946 track + 1, enc->short_name,
947 hb_container_get_short_name(mux));
948 }
949 return -1;
950 }
951 }
952 return 0;
953 }
954
955 /**
956 * Convert a json string representation of a job to an hb_job_t
957 * @param h - Pointer to the hb_handle_t hb instance which contains the
958 * title that the job refers to.
959 * @param json_job - Pointer to json string representation of a job
960 */
hb_dict_to_job(hb_handle_t * h,hb_dict_t * dict)961 hb_job_t* hb_dict_to_job( hb_handle_t * h, hb_dict_t *dict )
962 {
963 hb_job_t * job;
964 int result;
965 json_error_t error;
966 int titleindex;
967
968 if (dict == NULL)
969 return NULL;
970
971 result = json_unpack_ex(dict, &error, 0, "{s:{s:i}}",
972 "Source", "Title", unpack_i(&titleindex));
973 if (result < 0)
974 {
975 hb_error("hb_dict_to_job: failed to find title: %s", error.text);
976 return NULL;
977 }
978
979 job = hb_job_init_by_index(h, titleindex);
980 if (job == NULL)
981 {
982 hb_error("hb_dict_to_job: Title %d doesn't exist", titleindex);
983 return NULL;
984 }
985
986 hb_value_array_t * chapter_list = NULL;
987 hb_value_array_t * audio_list = NULL;
988 hb_value_array_t * subtitle_list = NULL;
989 hb_value_array_t * filter_list = NULL;
990 hb_value_t * mux = NULL, * vcodec = NULL;
991 hb_dict_t * mastering_dict = NULL;
992 hb_dict_t * coll_dict = NULL;
993 hb_value_t * acodec_copy_mask = NULL, * acodec_fallback = NULL;
994 const char * destfile = NULL;
995 const char * range_type = NULL;
996 const char * video_preset = NULL, * video_tune = NULL;
997 const char * video_profile = NULL, * video_level = NULL;
998 const char * video_options = NULL;
999 int subtitle_search_burn = 0;
1000 json_int_t range_start = -1, range_end = -1, range_seek_points = -1;
1001 int vbitrate = -1;
1002 double vquality = HB_INVALID_VIDEO_QUALITY;
1003 int adapter_index = -1;
1004 hb_dict_t * meta_dict = NULL;
1005
1006 result = json_unpack_ex(dict, &error, 0,
1007 "{"
1008 // SequenceID
1009 "s:i,"
1010 // Destination {File, Mux, InlineParameterSets, AlignAVStart,
1011 // ChapterMarkers, ChapterList,
1012 // Mp4Options {Mp4Optimize, IpodAtom}}
1013 "s:{s?s, s:o, s?b, s?b, s:b, s?o s?{s?b, s?b}},"
1014 // Source {Angle, Range {Type, Start, End, SeekPoints}}
1015 "s:{s?i, s?{s:s, s?I, s?I, s?I}},"
1016 // PAR {Num, Den}
1017 "s?{s:i, s:i},"
1018 // Video {Codec, Quality, Bitrate, Preset, Tune, Profile, Level, Options
1019 // TwoPass, Turbo,
1020 // ColorInputFormat, ColorOutputFormat, ColorRange,
1021 // ColorPrimaries, ColorTransfer, ColorMatrix,
1022 // Mastering,
1023 // ContentLightLevel,
1024 // ColorPrimariesOverride, ColorTransferOverride, ColorMatrixOverride,
1025 // QSV {Decode, AsyncDepth, AdapterIndex}}
1026 "s:{s:o, s?F, s?i, s?s, s?s, s?s, s?s, s?s,"
1027 " s?b, s?b,"
1028 " s?i, s?i, s?i,"
1029 " s?i, s?i, s?i,"
1030 " s?o,"
1031 " s?o,"
1032 " s?i, s?i, s?i,"
1033 " s?{s?b, s?i, s?i}},"
1034 // Audio {CopyMask, FallbackEncoder, AudioList}
1035 "s?{s?o, s?o, s?o},"
1036 // Subtitle {Search {Enable, Forced, Default, Burn}, SubtitleList}
1037 "s?{s?{s:b, s?b, s?b, s?b}, s?o},"
1038 // Metadata
1039 "s?o,"
1040 // Filters {FilterList}
1041 "s?{s?o}"
1042 "}",
1043 "SequenceID", unpack_i(&job->sequence_id),
1044 "Destination",
1045 "File", unpack_s(&destfile),
1046 "Mux", unpack_o(&mux),
1047 "InlineParameterSets", unpack_b(&job->inline_parameter_sets),
1048 "AlignAVStart", unpack_b(&job->align_av_start),
1049 "ChapterMarkers", unpack_b(&job->chapter_markers),
1050 "ChapterList", unpack_o(&chapter_list),
1051 "Mp4Options",
1052 "Mp4Optimize", unpack_b(&job->mp4_optimize),
1053 "IpodAtom", unpack_b(&job->ipod_atom),
1054 "Source",
1055 "Angle", unpack_i(&job->angle),
1056 "Range",
1057 "Type", unpack_s(&range_type),
1058 "Start", unpack_I(&range_start),
1059 "End", unpack_I(&range_end),
1060 "SeekPoints", unpack_I(&range_seek_points),
1061 "PAR",
1062 "Num", unpack_i(&job->par.num),
1063 "Den", unpack_i(&job->par.den),
1064 "Video",
1065 "Encoder", unpack_o(&vcodec),
1066 "Quality", unpack_f(&vquality),
1067 "Bitrate", unpack_i(&vbitrate),
1068 "Preset", unpack_s(&video_preset),
1069 "Tune", unpack_s(&video_tune),
1070 "Profile", unpack_s(&video_profile),
1071 "Level", unpack_s(&video_level),
1072 "Options", unpack_s(&video_options),
1073 "TwoPass", unpack_b(&job->twopass),
1074 "Turbo", unpack_b(&job->fastfirstpass),
1075 "ColorInputFormat", unpack_i(&job->input_pix_fmt),
1076 "ColorOutputFormat", unpack_i(&job->output_pix_fmt),
1077 "ColorRange", unpack_i(&job->color_range),
1078 "ColorPrimaries", unpack_i(&job->color_prim),
1079 "ColorTransfer", unpack_i(&job->color_transfer),
1080 "ColorMatrix", unpack_i(&job->color_matrix),
1081 "Mastering", unpack_o(&mastering_dict),
1082 "ContentLightLevel", unpack_o(&coll_dict),
1083 "ColorPrimariesOverride", unpack_i(&job->color_prim_override),
1084 "ColorTransferOverride", unpack_i(&job->color_transfer_override),
1085 "ColorMatrixOverride", unpack_i(&job->color_matrix_override),
1086 "QSV",
1087 "Decode", unpack_b(&job->qsv.decode),
1088 "AsyncDepth", unpack_i(&job->qsv.async_depth),
1089 "AdapterIndex", unpack_i(&adapter_index),
1090 "Audio",
1091 "CopyMask", unpack_o(&acodec_copy_mask),
1092 "FallbackEncoder", unpack_o(&acodec_fallback),
1093 "AudioList", unpack_o(&audio_list),
1094 "Subtitle",
1095 "Search",
1096 "Enable", unpack_b(&job->indepth_scan),
1097 "Forced", unpack_b(&job->select_subtitle_config.force),
1098 "Default", unpack_b(&job->select_subtitle_config.default_track),
1099 "Burn", unpack_b(&subtitle_search_burn),
1100 "SubtitleList", unpack_o(&subtitle_list),
1101 "Metadata", unpack_o(&meta_dict),
1102 "Filters",
1103 "FilterList", unpack_o(&filter_list)
1104 );
1105 if (result < 0)
1106 {
1107 hb_error("hb_dict_to_job: failed to parse dict: %s", error.text);
1108 goto fail;
1109 }
1110 if (meta_dict != NULL)
1111 {
1112 job->metadata->dict = hb_value_dup(meta_dict);
1113 }
1114 // Lookup mux id
1115 if (hb_value_type(mux) == HB_VALUE_TYPE_STRING)
1116 {
1117 const char *s = hb_value_get_string(mux);
1118 job->mux = hb_container_get_from_name(s);
1119 if (job->mux == 0)
1120 job->mux = hb_container_get_from_extension(s);
1121 }
1122 else
1123 {
1124 job->mux = hb_value_get_int(mux);
1125 }
1126
1127 // Lookup video codec
1128 if (hb_value_type(vcodec) == HB_VALUE_TYPE_STRING)
1129 {
1130 const char *s = hb_value_get_string(vcodec);
1131 job->vcodec = hb_video_encoder_get_from_name(s);
1132 }
1133 else
1134 {
1135 job->vcodec = hb_value_get_int(vcodec);
1136 }
1137
1138 if (range_type != NULL)
1139 {
1140 if (!strcasecmp(range_type, "preview"))
1141 {
1142 if (range_start >= 0)
1143 job->start_at_preview = range_start;
1144 if (range_end >= 0)
1145 job->pts_to_stop = range_end;
1146 if (range_seek_points >= 0)
1147 job->seek_points = range_seek_points;
1148 }
1149 else if (!strcasecmp(range_type, "chapter"))
1150 {
1151 if (range_start >= 0)
1152 job->chapter_start = range_start;
1153 if (range_end >= 0)
1154 job->chapter_end = range_end;
1155 }
1156 else if (!strcasecmp(range_type, "time"))
1157 {
1158 if (range_start >= 0)
1159 job->pts_to_start = range_start;
1160 if (range_end >= 0)
1161 job->pts_to_stop = range_end - job->pts_to_start;
1162 }
1163 else if (!strcasecmp(range_type, "frame"))
1164 {
1165 if (range_start > 0)
1166 job->frame_to_start = range_start - 1;
1167 if (range_end > 0)
1168 job->frame_to_stop = range_end - job->frame_to_start;
1169 }
1170 }
1171
1172 if (destfile != NULL && destfile[0] != 0)
1173 {
1174 hb_job_set_file(job, destfile);
1175 }
1176
1177 hb_job_set_encoder_preset(job, video_preset);
1178 hb_job_set_encoder_tune(job, video_tune);
1179 hb_job_set_encoder_profile(job, video_profile);
1180 hb_job_set_encoder_level(job, video_level);
1181 hb_job_set_encoder_options(job, video_options);
1182
1183 #if HB_PROJECT_FEATURE_QSV
1184 if (job->qsv.ctx) {
1185 job->qsv.ctx->dx_index = adapter_index;
1186 }
1187 #endif
1188 // If both vbitrate and vquality were specified, vbitrate is used;
1189 // we need to ensure the unused rate control mode is always set to an
1190 // invalid value, as if both values are valid, behavior is undefined
1191 // (some encoders first check for a valid vquality, whereas others
1192 // check for a valid vbitrate instead)
1193 if (vbitrate > 0)
1194 {
1195 job->vbitrate = vbitrate;
1196 job->vquality = HB_INVALID_VIDEO_QUALITY;
1197 }
1198 else if (vquality > HB_INVALID_VIDEO_QUALITY)
1199 {
1200 job->vbitrate = -1;
1201 job->vquality = vquality;
1202 }
1203 // If neither were specified, defaults are used (set in job_setup())
1204
1205 job->select_subtitle_config.dest = subtitle_search_burn ?
1206 RENDERSUB : PASSTHRUSUB;
1207
1208 if (mastering_dict != NULL)
1209 {
1210 result = json_unpack_ex(mastering_dict, &error, 0,
1211 "{"
1212 // DisplayPrimaries[3][2]
1213 "s:[[[ii],[ii]],[[ii],[ii]],[[ii],[ii]]],"
1214 // WhitePoint[2],
1215 "s:[[i,i],[i,i]],"
1216 // MinLuminance, MaxLuminance, HasPrimaries, HasLuminance
1217 "s:[i,i],s:[i,i],s:b,s:b"
1218 "}",
1219 "DisplayPrimaries", unpack_i(&job->mastering.display_primaries[0][0].num),
1220 unpack_i(&job->mastering.display_primaries[0][0].den),
1221 unpack_i(&job->mastering.display_primaries[0][1].num),
1222 unpack_i(&job->mastering.display_primaries[0][1].den),
1223 unpack_i(&job->mastering.display_primaries[1][0].num),
1224 unpack_i(&job->mastering.display_primaries[1][0].den),
1225 unpack_i(&job->mastering.display_primaries[1][1].num),
1226 unpack_i(&job->mastering.display_primaries[1][1].den),
1227 unpack_i(&job->mastering.display_primaries[2][0].num),
1228 unpack_i(&job->mastering.display_primaries[2][0].den),
1229 unpack_i(&job->mastering.display_primaries[2][1].num),
1230 unpack_i(&job->mastering.display_primaries[2][1].den),
1231 "WhitePoint", unpack_i(&job->mastering.white_point[0].num),
1232 unpack_i(&job->mastering.white_point[0].den),
1233 unpack_i(&job->mastering.white_point[1].num),
1234 unpack_i(&job->mastering.white_point[1].den),
1235 "MinLuminance", unpack_i(&job->mastering.min_luminance.num),
1236 unpack_i(&job->mastering.min_luminance.den),
1237 "MaxLuminance", unpack_i(&job->mastering.max_luminance.num),
1238 unpack_i(&job->mastering.max_luminance.den),
1239 "HasPrimaries", unpack_b(&job->mastering.has_primaries),
1240 "HasLuminance", unpack_b(&job->mastering.has_luminance)
1241 );
1242 if (result < 0)
1243 {
1244 hb_error("hb_dict_to_job: failed to parse mastering_dict: %s", error.text);
1245 goto fail;
1246 }
1247 }
1248
1249 if (coll_dict != NULL)
1250 {
1251 result = json_unpack_ex(coll_dict, &error, 0,
1252 // {MaxCLL, MaxFALL}
1253 "{s:i, s:i}",
1254 "MaxCLL", unpack_u(&job->coll.max_cll),
1255 "MaxFALL", unpack_u(&job->coll.max_fall)
1256 );
1257 if (result < 0)
1258 {
1259 hb_error("hb_dict_to_job: failed to parse coll_dict: %s", error.text);
1260 goto fail;
1261 }
1262 }
1263
1264 // process chapter list
1265 if (chapter_list != NULL &&
1266 hb_value_type(chapter_list) == HB_VALUE_TYPE_ARRAY)
1267 {
1268 int ii, count;
1269 hb_dict_t *chapter_dict;
1270 count = hb_value_array_len(chapter_list);
1271 for (ii = 0; ii < count; ii++)
1272 {
1273 chapter_dict = hb_value_array_get(chapter_list, ii);
1274 const char *name = NULL;
1275 result = json_unpack_ex(chapter_dict, &error, 0,
1276 "{s:s}", "Name", unpack_s(&name));
1277 if (result < 0)
1278 {
1279 hb_error("hb_dict_to_job: failed to find chapter name: %s",
1280 error.text);
1281 goto fail;
1282 }
1283 if (name != NULL && name[0] != 0)
1284 {
1285 hb_chapter_t *chapter;
1286 chapter = hb_list_item(job->list_chapter, ii);
1287 if (chapter != NULL)
1288 {
1289 hb_chapter_set_title(chapter, name);
1290 }
1291 }
1292 }
1293 }
1294
1295 // process filter list
1296 if (filter_list != NULL &&
1297 hb_value_type(filter_list) == HB_VALUE_TYPE_ARRAY)
1298 {
1299 int ii, count;
1300 hb_dict_t *filter_dict;
1301 count = hb_value_array_len(filter_list);
1302 for (ii = 0; ii < count; ii++)
1303 {
1304 filter_dict = hb_value_array_get(filter_list, ii);
1305 int filter_id = -1;
1306 hb_value_t *filter_settings = NULL;
1307 result = json_unpack_ex(filter_dict, &error, 0, "{s:i, s?o}",
1308 "ID", unpack_i(&filter_id),
1309 "Settings", unpack_o(&filter_settings));
1310 if (result < 0)
1311 {
1312 hb_error("hb_dict_to_job: failed to find filter settings: %s",
1313 error.text);
1314 goto fail;
1315 }
1316 if (filter_id >= HB_FILTER_FIRST && filter_id <= HB_FILTER_LAST)
1317 {
1318 hb_filter_object_t *filter;
1319 filter = hb_filter_init(filter_id);
1320 hb_add_filter_dict(job, filter, filter_settings);
1321 }
1322 }
1323 }
1324
1325 // process audio list
1326 if (acodec_fallback != NULL)
1327 {
1328 if (hb_value_type(acodec_fallback) == HB_VALUE_TYPE_STRING)
1329 {
1330 const char *s = hb_value_get_string(acodec_fallback);
1331 job->acodec_fallback = hb_audio_encoder_get_from_name(s);
1332 }
1333 else
1334 {
1335 job->acodec_fallback = hb_value_get_int(acodec_fallback);
1336 }
1337 }
1338 if (acodec_copy_mask != NULL)
1339 {
1340 if (hb_value_type(acodec_copy_mask) == HB_VALUE_TYPE_ARRAY)
1341 {
1342 int count, ii;
1343 count = hb_value_array_len(acodec_copy_mask);
1344 for (ii = 0; ii < count; ii++)
1345 {
1346 hb_value_t *value = hb_value_array_get(acodec_copy_mask, ii);
1347 if (hb_value_type(value) == HB_VALUE_TYPE_STRING)
1348 {
1349 const char *s = hb_value_get_string(value);
1350 job->acodec_copy_mask |= hb_audio_encoder_get_from_name(s);
1351 }
1352 else
1353 {
1354 job->acodec_copy_mask |= hb_value_get_int(value);
1355 }
1356 }
1357 }
1358 else if (hb_value_type(acodec_copy_mask) == HB_VALUE_TYPE_STRING)
1359 {
1360 // Split the string at ','
1361 char *s = strdup(hb_value_get_string(acodec_copy_mask));
1362 char *cur = s;
1363 while (cur != NULL && cur[0] != 0)
1364 {
1365 char *next = strchr(cur, ',');
1366 if (next != NULL)
1367 {
1368 *next = 0;
1369 next++;
1370 }
1371 job->acodec_copy_mask |= hb_audio_encoder_get_from_name(cur);
1372 cur = next;
1373 }
1374 free(s);
1375 }
1376 else
1377 {
1378 job->acodec_copy_mask = hb_value_get_int(acodec_copy_mask);
1379 }
1380 }
1381 if (audio_list != NULL && hb_value_type(audio_list) == HB_VALUE_TYPE_ARRAY)
1382 {
1383 int ii, count;
1384 hb_dict_t *audio_dict;
1385 count = hb_value_array_len(audio_list);
1386 for (ii = 0; ii < count; ii++)
1387 {
1388 audio_dict = hb_value_array_get(audio_list, ii);
1389 hb_audio_config_t audio;
1390 hb_value_t *acodec = NULL, *samplerate = NULL, *mixdown = NULL;
1391 hb_value_t *dither = NULL;
1392 const char *name = NULL;
1393
1394 hb_audio_config_init(&audio);
1395 result = json_unpack_ex(audio_dict, &error, 0,
1396 "{s:i, s?s, s?o, s?F, s?F, s?o, s?b, s?o, s?o, s?i, s?F, s?F}",
1397 "Track", unpack_i(&audio.in.track),
1398 "Name", unpack_s(&name),
1399 "Encoder", unpack_o(&acodec),
1400 "Gain", unpack_f(&audio.out.gain),
1401 "DRC", unpack_f(&audio.out.dynamic_range_compression),
1402 "Mixdown", unpack_o(&mixdown),
1403 "NormalizeMixLevel", unpack_b(&audio.out.normalize_mix_level),
1404 "DitherMethod", unpack_o(&dither),
1405 "Samplerate", unpack_o(&samplerate),
1406 "Bitrate", unpack_i(&audio.out.bitrate),
1407 "Quality", unpack_f(&audio.out.quality),
1408 "CompressionLevel", unpack_f(&audio.out.compression_level));
1409 if (result < 0)
1410 {
1411 hb_error("hb_dict_to_job: failed to find audio settings: %s",
1412 error.text);
1413 goto fail;
1414 }
1415 if (acodec != NULL)
1416 {
1417 if (hb_value_type(acodec) == HB_VALUE_TYPE_STRING)
1418 {
1419 const char *s = hb_value_get_string(acodec);
1420 audio.out.codec = hb_audio_encoder_get_from_name(s);
1421 }
1422 else
1423 {
1424 audio.out.codec = hb_value_get_int(acodec);
1425 }
1426 }
1427 if (mixdown != NULL)
1428 {
1429 if (hb_value_type(mixdown) == HB_VALUE_TYPE_STRING)
1430 {
1431 const char *s = hb_value_get_string(mixdown);
1432 audio.out.mixdown = hb_mixdown_get_from_name(s);
1433 }
1434 else
1435 {
1436 audio.out.mixdown = hb_value_get_int(mixdown);
1437 }
1438 }
1439 if (samplerate != NULL)
1440 {
1441 if (hb_value_type(samplerate) == HB_VALUE_TYPE_STRING)
1442 {
1443 const char *s = hb_value_get_string(samplerate);
1444 audio.out.samplerate = hb_audio_samplerate_get_from_name(s);
1445 if (audio.out.samplerate < 0)
1446 audio.out.samplerate = 0;
1447 }
1448 else
1449 {
1450 audio.out.samplerate = hb_value_get_int(samplerate);
1451 }
1452 }
1453 if (dither != NULL)
1454 {
1455 if (hb_value_type(dither) == HB_VALUE_TYPE_STRING)
1456 {
1457 const char *s = hb_value_get_string(dither);
1458 audio.out.dither_method = hb_audio_dither_get_from_name(s);
1459 }
1460 else
1461 {
1462 audio.out.dither_method = hb_value_get_int(dither);
1463 }
1464 }
1465 if (name != NULL)
1466 {
1467 audio.out.name = strdup(name);
1468 }
1469 if (audio.in.track >= 0)
1470 {
1471 audio.out.track = ii;
1472 hb_audio_add(job, &audio);
1473 }
1474 }
1475 }
1476
1477 // Audio sanity checks
1478 int ii;
1479 for (ii = 0; ii < hb_list_count(job->list_audio); )
1480 {
1481 hb_audio_config_t *acfg;
1482 acfg = hb_list_audio_config_item(job->list_audio, ii);
1483 if (validate_audio_codec_mux(acfg->out.codec, job->mux, ii))
1484 {
1485 // drop the track
1486 hb_audio_t * audio = hb_list_item(job->list_audio, ii);
1487 hb_list_rem(job->list_audio, audio);
1488 hb_audio_close(&audio);
1489 continue;
1490 }
1491 ii++;
1492 }
1493
1494 // process subtitle list
1495 if (subtitle_list != NULL &&
1496 hb_value_type(subtitle_list) == HB_VALUE_TYPE_ARRAY)
1497 {
1498 int ii, count;
1499 hb_dict_t *subtitle_dict;
1500 count = hb_value_array_len(subtitle_list);
1501 for (ii = 0; ii < count; ii++)
1502 {
1503 subtitle_dict = hb_value_array_get(subtitle_list, ii);
1504 hb_subtitle_config_t sub_config = {0};
1505 int track = -1;
1506 int burn = 0;
1507 const char *importfile = NULL;
1508 json_int_t offset = 0;
1509 const char *name = NULL;
1510
1511 result = json_unpack_ex(subtitle_dict, &error, 0,
1512 "{s?i, s?s, s?{s:s}, s?{s:s}}",
1513 "Track", unpack_i(&track),
1514 "Name", unpack_s(&name),
1515 // Support legacy "SRT" import
1516 "SRT",
1517 "Filename", unpack_s(&importfile),
1518 "Import",
1519 "Filename", unpack_s(&importfile));
1520 if (result < 0)
1521 {
1522 hb_error("json unpack failure: %s", error.text);
1523 hb_job_close(&job);
1524 return NULL;
1525 }
1526
1527 // Embedded subtitle track
1528 if (track >= 0 && importfile == NULL)
1529 {
1530 hb_subtitle_t *subtitle;
1531 subtitle = hb_list_item(job->title->list_subtitle, track);
1532 if (subtitle != NULL)
1533 {
1534 sub_config = subtitle->config;
1535 if (name != NULL)
1536 {
1537 sub_config.name = strdup(name);
1538 }
1539 result = json_unpack_ex(subtitle_dict, &error, 0,
1540 "{s?b, s?b, s?b, s?I}",
1541 "Default", unpack_b(&sub_config.default_track),
1542 "Forced", unpack_b(&sub_config.force),
1543 "Burn", unpack_b(&burn),
1544 "Offset", unpack_I(&offset));
1545 if (result < 0)
1546 {
1547 hb_error("json unpack failure: %s", error.text);
1548 hb_job_close(&job);
1549 return NULL;
1550 }
1551 sub_config.offset = offset;
1552 sub_config.dest = burn ? RENDERSUB : PASSTHRUSUB;
1553 hb_subtitle_add(job, &sub_config, track);
1554 }
1555 }
1556 else if (importfile != NULL)
1557 {
1558 sub_config.src_filename = strdup(importfile);
1559
1560 const char * lang = "und";
1561 const char * srtcodeset = "UTF-8";
1562 const char * format = "SRT";
1563 int source = IMPORTSRT;
1564 result = json_unpack_ex(subtitle_dict, &error, 0,
1565 "{s?b, s?b, s?I, " // Common
1566 "s?{s?s, s?s, s?s}," // Legacy SRT settings
1567 "s?{s?s, s?s, s?s, s?s}}", // Import settings
1568 "Default", unpack_b(&sub_config.default_track),
1569 "Burn", unpack_b(&burn),
1570 "Offset", unpack_I(&offset),
1571 "SRT",
1572 "Filename", unpack_s(&importfile),
1573 "Language", unpack_s(&lang),
1574 "Codeset", unpack_s(&srtcodeset),
1575 "Import",
1576 "Format", unpack_s(&format),
1577 "Filename", unpack_s(&importfile),
1578 "Language", unpack_s(&lang),
1579 "Codeset", unpack_s(&srtcodeset));
1580 if (result < 0)
1581 {
1582 hb_error("json unpack failure: %s", error.text);
1583 hb_job_close(&job);
1584 return NULL;
1585 }
1586 if (name != NULL)
1587 {
1588 sub_config.name = strdup(name);
1589 }
1590 sub_config.offset = offset;
1591 sub_config.dest = burn ? RENDERSUB : PASSTHRUSUB;
1592 strncpy(sub_config.src_codeset, srtcodeset, 39);
1593 sub_config.src_codeset[39] = 0;
1594 if (!strcasecmp(format, "SSA"))
1595 {
1596 source = IMPORTSSA;
1597 }
1598 hb_import_subtitle_add(job, &sub_config, lang, source);
1599 }
1600 }
1601 }
1602
1603 return job;
1604
1605 fail:
1606 hb_job_close(&job);
1607 return NULL;
1608 }
1609
hb_json_to_job(hb_handle_t * h,const char * json_job)1610 hb_job_t* hb_json_to_job( hb_handle_t * h, const char * json_job )
1611 {
1612 hb_dict_t *dict = hb_value_json(json_job);
1613 hb_job_t *job = hb_dict_to_job(h, dict);
1614 hb_value_free(&dict);
1615 return job;
1616 }
1617
1618 /**
1619 * Initialize an hb_job_t and return a json string representation of the job
1620 * @param h - Pointer to hb_handle_t instance that contains the
1621 * specified title_index
1622 * @param title_index - Index of hb_title_t to use for job initialization.
1623 * Index comes from title->index or "Index" key
1624 * in json representation of a title.
1625 */
hb_job_init_json(hb_handle_t * h,int title_index)1626 char* hb_job_init_json(hb_handle_t *h, int title_index)
1627 {
1628 hb_job_t *job = hb_job_init_by_index(h, title_index);
1629 char *json_job = hb_job_to_json(job);
1630 hb_job_close(&job);
1631 return json_job;
1632 }
1633
hb_preset_job_init_json(hb_handle_t * h,int title_index,const char * json_preset)1634 char* hb_preset_job_init_json(hb_handle_t *h, int title_index,
1635 const char *json_preset)
1636 {
1637 hb_dict_t * preset = hb_value_json(json_preset);
1638 hb_dict_t * job = hb_preset_job_init(h, title_index, preset);
1639 char * json_job = hb_value_get_json(job);
1640
1641 hb_value_free(&preset);
1642 hb_value_free(&job);
1643
1644 return json_job;
1645 }
1646
1647 /**
1648 * Add a json string job to the hb queue
1649 * @param h - Pointer to hb_handle_t instance that job is added to
1650 * @param json_job - json string representation of job to add
1651 */
hb_add_json(hb_handle_t * h,const char * json_job)1652 int hb_add_json( hb_handle_t * h, const char * json_job )
1653 {
1654 hb_job_t job;
1655
1656 job.json = json_job;
1657 return hb_add(h, &job);
1658 }
1659
1660
1661 /**
1662 * Calculates destination width and height for anamorphic content
1663 *
1664 * Returns geometry as json string {Width, Height, PAR {Num, Den}}
1665 * @param json_param - contains source and destination geometry params.
1666 * This encapsulates the values that are in
1667 * hb_geometry_t and hb_geometry_settings_t
1668 */
hb_set_anamorphic_size_json(const char * json_param)1669 char* hb_set_anamorphic_size_json(const char * json_param)
1670 {
1671 int json_result;
1672 json_error_t error;
1673 hb_dict_t * dict;
1674 hb_geometry_t geo_result;
1675 hb_geometry_t src;
1676 hb_geometry_settings_t ui_geo;
1677
1678 // Clear dest geometry since some fields are optional.
1679 memset(&ui_geo, 0, sizeof(ui_geo));
1680
1681 dict = hb_value_json(json_param);
1682 json_result = json_unpack_ex(dict, &error, 0,
1683 "{"
1684 // SourceGeometry
1685 // {Width, Height, PAR {Num, Den}}
1686 "s:{s:i, s:i, s:{s:i, s:i}},"
1687 // DestSettings
1688 "s:{"
1689 // Geometry {Width, Height, PAR {Num, Den}},
1690 "s:{s:i, s:i, s:{s:i, s:i}},"
1691 // AnamorphicMode, Flags, Keep, ItuPAR, Modulus, MaxWidth, MaxHeight,
1692 // DisplayWidth, DisplayHeight
1693 "s:i, s?i, s?i, s?b, s?i, s?i, s?i, s?i, s?i"
1694 // Crop [Top, Bottom, Left, Right]
1695 "s?[iiii]"
1696 // Pad [Top, Bottom, Left, Right]
1697 "s?[iiii]"
1698 " }"
1699 "}",
1700 "SourceGeometry",
1701 "Width", unpack_i(&src.width),
1702 "Height", unpack_i(&src.height),
1703 "PAR",
1704 "Num", unpack_i(&src.par.num),
1705 "Den", unpack_i(&src.par.den),
1706 "DestSettings",
1707 "Geometry",
1708 "Width", unpack_i(&ui_geo.geometry.width),
1709 "Height", unpack_i(&ui_geo.geometry.height),
1710 "PAR",
1711 "Num", unpack_i(&ui_geo.geometry.par.num),
1712 "Den", unpack_i(&ui_geo.geometry.par.den),
1713 "AnamorphicMode", unpack_i(&ui_geo.mode),
1714 "Flags", unpack_i(&ui_geo.flags),
1715 "Keep", unpack_i(&ui_geo.keep),
1716 "ItuPAR", unpack_b(&ui_geo.itu_par),
1717 "Modulus", unpack_i(&ui_geo.modulus),
1718 "MaxWidth", unpack_i(&ui_geo.maxWidth),
1719 "MaxHeight", unpack_i(&ui_geo.maxHeight),
1720 "DisplayWidth", unpack_i(&ui_geo.displayWidth),
1721 "DisplayHeight", unpack_i(&ui_geo.displayHeight),
1722 "Crop", unpack_i(&ui_geo.crop[0]),
1723 unpack_i(&ui_geo.crop[1]),
1724 unpack_i(&ui_geo.crop[2]),
1725 unpack_i(&ui_geo.crop[3]),
1726 "Pad", unpack_i(&ui_geo.pad[0]),
1727 unpack_i(&ui_geo.pad[1]),
1728 unpack_i(&ui_geo.pad[2]),
1729 unpack_i(&ui_geo.pad[3])
1730 );
1731 hb_value_free(&dict);
1732
1733 if (json_result < 0)
1734 {
1735 hb_error("json unpack failure: %s", error.text);
1736 return NULL;
1737 }
1738
1739 hb_set_anamorphic_size2(&src, &ui_geo, &geo_result);
1740
1741 dict = json_pack_ex(&error, 0,
1742 "{s:o, s:o, s:{s:o, s:o}}",
1743 "Width", hb_value_int(geo_result.width),
1744 "Height", hb_value_int(geo_result.height),
1745 "PAR",
1746 "Num", hb_value_int(geo_result.par.num),
1747 "Den", hb_value_int(geo_result.par.den));
1748 if (dict == NULL)
1749 {
1750 hb_error("hb_set_anamorphic_size_json: pack failure: %s", error.text);
1751 return NULL;
1752 }
1753 char *result = hb_value_get_json(dict);
1754 hb_value_free(&dict);
1755
1756 return result;
1757 }
1758
hb_get_preview_json(hb_handle_t * h,const char * json_param)1759 char* hb_get_preview_json(hb_handle_t * h, const char *json_param)
1760 {
1761 hb_image_t *image;
1762 int ii, title_idx, preview_idx, deinterlace = 0;
1763
1764 int json_result;
1765 json_error_t error;
1766 hb_dict_t * dict;
1767 hb_geometry_settings_t settings;
1768
1769 // Clear dest geometry since some fields are optional.
1770 memset(&settings, 0, sizeof(settings));
1771
1772 dict = hb_value_json(json_param);
1773 json_result = json_unpack_ex(dict, &error, 0,
1774 "{"
1775 // Title, Preview, Deinterlace
1776 "s:i, s:i, s?b,"
1777 // DestSettings
1778 "s:{"
1779 // Geometry {Width, Height, PAR {Num, Den}},
1780 "s:{s:i, s:i, s:{s:i, s:i}},"
1781 // AnamorphicMode, Keep, ItuPAR, Modulus, MaxWidth, MaxHeight,
1782 "s:i, s?i, s?b, s:i, s:i, s:i,"
1783 // Crop [Top, Bottom, Left, Right]
1784 "s?[iiii]"
1785 " }"
1786 "}",
1787 "Title", unpack_i(&title_idx),
1788 "Preview", unpack_i(&preview_idx),
1789 "Deinterlace", unpack_b(&deinterlace),
1790 "DestSettings",
1791 "Geometry",
1792 "Width", unpack_i(&settings.geometry.width),
1793 "Height", unpack_i(&settings.geometry.height),
1794 "PAR",
1795 "Num", unpack_i(&settings.geometry.par.num),
1796 "Den", unpack_i(&settings.geometry.par.den),
1797 "AnamorphicMode", unpack_i(&settings.mode),
1798 "Keep", unpack_i(&settings.keep),
1799 "ItuPAR", unpack_b(&settings.itu_par),
1800 "Modulus", unpack_i(&settings.modulus),
1801 "MaxWidth", unpack_i(&settings.maxWidth),
1802 "MaxHeight", unpack_i(&settings.maxHeight),
1803 "Crop", unpack_i(&settings.crop[0]),
1804 unpack_i(&settings.crop[1]),
1805 unpack_i(&settings.crop[2]),
1806 unpack_i(&settings.crop[3])
1807 );
1808 hb_value_free(&dict);
1809
1810 if (json_result < 0)
1811 {
1812 hb_error("preview params: json unpack failure: %s", error.text);
1813 return NULL;
1814 }
1815
1816 image = hb_get_preview2(h, title_idx, preview_idx, &settings, deinterlace);
1817 if (image == NULL)
1818 {
1819 return NULL;
1820 }
1821
1822 dict = json_pack_ex(&error, 0,
1823 "{s:o, s:o, s:o}",
1824 "Format", hb_value_int(image->format),
1825 "Width", hb_value_int(image->width),
1826 "Height", hb_value_int(image->height));
1827 if (dict == NULL)
1828 {
1829 hb_error("hb_get_preview_json: pack failure: %s", error.text);
1830 return NULL;
1831 }
1832
1833 hb_value_array_t * planes = hb_value_array_init();
1834 for (ii = 0; ii < 4; ii++)
1835 {
1836 int base64size = AV_BASE64_SIZE(image->plane[ii].size);
1837 if (image->plane[ii].size <= 0 || base64size <= 0)
1838 continue;
1839
1840 char *plane_base64 = calloc(base64size, 1);
1841 av_base64_encode(plane_base64, base64size,
1842 image->plane[ii].data, image->plane[ii].size);
1843
1844 base64size = strlen(plane_base64);
1845 hb_dict_t *plane_dict;
1846 plane_dict = json_pack_ex(&error, 0,
1847 "{s:o, s:o, s:o, s:o, s:o, s:o}",
1848 "Width", hb_value_int(image->plane[ii].width),
1849 "Height", hb_value_int(image->plane[ii].height),
1850 "Stride", hb_value_int(image->plane[ii].stride),
1851 "HeightStride", hb_value_int(image->plane[ii].height_stride),
1852 "Size", hb_value_int(base64size),
1853 "Data", hb_value_string(plane_base64)
1854 );
1855 if (plane_dict == NULL)
1856 {
1857 hb_error("plane_dict: json pack failure: %s", error.text);
1858 return NULL;
1859 }
1860 hb_value_array_append(planes, plane_dict);
1861 }
1862 hb_dict_set(dict, "Planes", planes);
1863 hb_image_close(&image);
1864
1865 char *result = hb_value_get_json(dict);
1866 hb_value_free(&dict);
1867
1868 return result;
1869 }
1870
hb_get_preview3_json(hb_handle_t * h,int picture,const char * json_job)1871 hb_image_t * hb_get_preview3_json(hb_handle_t * h, int picture, const char *json_job)
1872 {
1873 hb_image_t * image;
1874 hb_dict_t * job_dict;
1875
1876 job_dict = hb_value_json(json_job);
1877 image = hb_get_preview3(h, picture, job_dict);
1878 hb_value_free(&job_dict);
1879
1880 return image;
1881 }
1882
hb_get_preview_params_json(int title_idx,int preview_idx,int deinterlace,hb_geometry_settings_t * settings)1883 char* hb_get_preview_params_json(int title_idx, int preview_idx,
1884 int deinterlace, hb_geometry_settings_t *settings)
1885 {
1886 json_error_t error;
1887 hb_dict_t * dict;
1888
1889 dict = json_pack_ex(&error, 0,
1890 "{"
1891 "s:o, s:o, s:o,"
1892 "s:{"
1893 " s:{s:o, s:o, s:{s:o, s:o}},"
1894 " s:o, s:o, s:o, s:o, s:o, s:o"
1895 " s:[oooo]"
1896 " }"
1897 "}",
1898 "Title", hb_value_int(title_idx),
1899 "Preview", hb_value_int(preview_idx),
1900 "Deinterlace", hb_value_bool(deinterlace),
1901 "DestSettings",
1902 "Geometry",
1903 "Width", hb_value_int(settings->geometry.width),
1904 "Height", hb_value_int(settings->geometry.height),
1905 "PAR",
1906 "Num", hb_value_int(settings->geometry.par.num),
1907 "Den", hb_value_int(settings->geometry.par.den),
1908 "AnamorphicMode", hb_value_int(settings->mode),
1909 "Keep", hb_value_int(settings->keep),
1910 "ItuPAR", hb_value_bool(settings->itu_par),
1911 "Modulus", hb_value_int(settings->modulus),
1912 "MaxWidth", hb_value_int(settings->maxWidth),
1913 "MaxHeight", hb_value_int(settings->maxHeight),
1914 "Crop", hb_value_int(settings->crop[0]),
1915 hb_value_int(settings->crop[1]),
1916 hb_value_int(settings->crop[2]),
1917 hb_value_int(settings->crop[3])
1918 );
1919 if (dict == NULL)
1920 {
1921 hb_error("hb_get_preview_params_json: pack failure: %s", error.text);
1922 return NULL;
1923 }
1924
1925 char *result = hb_value_get_json(dict);
1926 hb_value_free(&dict);
1927
1928 return result;
1929 }
1930
hb_json_to_image(char * json_image)1931 hb_image_t* hb_json_to_image(char *json_image)
1932 {
1933 int json_result;
1934 json_error_t error;
1935 hb_dict_t * dict;
1936 int pix_fmt, width, height;
1937 dict = hb_value_json(json_image);
1938 json_result = json_unpack_ex(dict, &error, 0,
1939 "{"
1940 // Format, Width, Height
1941 "s:i, s:i, s:i,"
1942 "}",
1943 "Format", unpack_i(&pix_fmt),
1944 "Width", unpack_i(&width),
1945 "Height", unpack_b(&height)
1946 );
1947 if (json_result < 0)
1948 {
1949 hb_error("image: json unpack failure: %s", error.text);
1950 hb_value_free(&dict);
1951 return NULL;
1952 }
1953
1954 hb_image_t *image = hb_image_init(pix_fmt, width, height);
1955 if (image == NULL)
1956 {
1957 hb_value_free(&dict);
1958 return NULL;
1959 }
1960
1961 hb_value_array_t * planes = NULL;
1962 json_result = json_unpack_ex(dict, &error, 0,
1963 "{s:o}", "Planes", unpack_o(&planes));
1964 if (json_result < 0)
1965 {
1966 hb_error("image::planes: json unpack failure: %s", error.text);
1967 hb_value_free(&dict);
1968 return image;
1969 }
1970 if (hb_value_type(planes) == HB_VALUE_TYPE_ARRAY)
1971 {
1972 int ii, count;
1973 hb_dict_t *plane_dict;
1974 count = hb_value_array_len(planes);
1975 for (ii = 0; ii < count; ii++)
1976 {
1977 plane_dict = hb_value_array_get(planes, ii);
1978 const char *data = NULL;
1979 int size;
1980 json_result = json_unpack_ex(plane_dict, &error, 0,
1981 "{s:i, s:s}",
1982 "Size", unpack_i(&size),
1983 "Data", unpack_s(&data));
1984 if (json_result < 0)
1985 {
1986 hb_error("image::plane::data: json unpack failure: %s", error.text);
1987 hb_value_free(&dict);
1988 return image;
1989 }
1990 if (image->plane[ii].size > 0 && data != NULL)
1991 {
1992 av_base64_decode(image->plane[ii].data, data,
1993 image->plane[ii].size);
1994 }
1995 }
1996 }
1997 hb_value_free(&dict);
1998
1999 return image;
2000 }
2001