1 /*
2  * VP9 HW decode acceleration through VDPAU
3  *
4  * Copyright (c) 2019 Manoj Gupta Bonda
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software Foundation,
20  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <vdpau/vdpau.h>
24 #include "libavutil/pixdesc.h"
25 #include "avcodec.h"
26 #include "internal.h"
27 #include "vp9data.h"
28 #include "vp9dec.h"
29 #include "hwconfig.h"
30 #include "vdpau.h"
31 #include "vdpau_internal.h"
32 
vdpau_vp9_start_frame(AVCodecContext * avctx,const uint8_t * buffer,uint32_t size)33 static int vdpau_vp9_start_frame(AVCodecContext *avctx,
34                                   const uint8_t *buffer, uint32_t size)
35 {
36     VP9Context *s = avctx->priv_data;
37     VP9SharedContext *h = &(s->s);
38     VP9Frame pic = h->frames[CUR_FRAME];
39     struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
40     int i;
41 
42     VdpPictureInfoVP9 *info = &pic_ctx->info.vp9;
43     const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
44     if (!pixdesc) {
45         return AV_PIX_FMT_NONE;
46     }
47 
48     info->width = avctx->width;
49     info->height = avctx->height;
50     /*  fill LvPictureInfoVP9 struct */
51     info->lastReference  = VDP_INVALID_HANDLE;
52     info->goldenReference = VDP_INVALID_HANDLE;
53     info->altReference = VDP_INVALID_HANDLE;
54 
55     if (h->refs[h->h.refidx[0]].f && h->refs[h->h.refidx[0]].f->private_ref) {
56         info->lastReference               = ff_vdpau_get_surface_id(h->refs[h->h.refidx[0]].f);
57     }
58     if (h->refs[h->h.refidx[1]].f && h->refs[h->h.refidx[1]].f->private_ref) {
59         info->goldenReference             = ff_vdpau_get_surface_id(h->refs[h->h.refidx[1]].f);
60     }
61     if (h->refs[h->h.refidx[2]].f && h->refs[h->h.refidx[2]].f->private_ref) {
62         info->altReference                = ff_vdpau_get_surface_id(h->refs[h->h.refidx[2]].f);
63     }
64 
65     info->profile                  = h->h.profile;
66     info->frameContextIdx          = h->h.framectxid;
67     info->keyFrame                 = h->h.keyframe;
68     info->showFrame                = !h->h.invisible;
69     info->errorResilient           = h->h.errorres;
70     info->frameParallelDecoding    = h->h.parallelmode;
71 
72     info->subSamplingX             = pixdesc->log2_chroma_w;
73     info->subSamplingY             = pixdesc->log2_chroma_h;
74 
75     info->intraOnly                = h->h.intraonly;
76     info->allowHighPrecisionMv     = h->h.keyframe ? 0 : h->h.highprecisionmvs;
77     info->refreshEntropyProbs      = h->h.refreshctx;
78 
79     info->bitDepthMinus8Luma       = pixdesc->comp[0].depth - 8;
80     info->bitDepthMinus8Chroma     = pixdesc->comp[1].depth - 8;
81 
82     info->loopFilterLevel          = h->h.filter.level;
83     info->loopFilterSharpness      = h->h.filter.sharpness;
84     info->modeRefLfEnabled         = h->h.lf_delta.enabled;
85 
86     info->log2TileColumns          = h->h.tiling.log2_tile_cols;
87     info->log2TileRows             = h->h.tiling.log2_tile_rows;
88 
89     info->segmentEnabled           = h->h.segmentation.enabled;
90     info->segmentMapUpdate         = h->h.segmentation.update_map;
91     info->segmentMapTemporalUpdate = h->h.segmentation.temporal;
92     info->segmentFeatureMode       = h->h.segmentation.absolute_vals;
93 
94     info->qpYAc                    = h->h.yac_qi;
95     info->qpYDc                    = h->h.ydc_qdelta;
96     info->qpChDc                   = h->h.uvdc_qdelta;
97     info->qpChAc                   = h->h.uvac_qdelta;
98 
99     info->resetFrameContext        = h->h.resetctx;
100     info->mcompFilterType          = h->h.filtermode ^ (h->h.filtermode <= 1);
101     info->uncompressedHeaderSize   = h->h.uncompressed_header_size;
102     info->compressedHeaderSize     = h->h.compressed_header_size;
103     info->refFrameSignBias[0]      = 0;
104 
105 
106     for (i = 0; i < FF_ARRAY_ELEMS(info->mbModeLfDelta); i++)
107         info->mbModeLfDelta[i] = h->h.lf_delta.mode[i];
108 
109     for (i = 0; i < FF_ARRAY_ELEMS(info->mbRefLfDelta); i++)
110         info->mbRefLfDelta[i] = h->h.lf_delta.ref[i];
111 
112     for (i = 0; i < FF_ARRAY_ELEMS(info->mbSegmentTreeProbs); i++)
113         info->mbSegmentTreeProbs[i] = h->h.segmentation.prob[i];
114 
115     for (i = 0; i < FF_ARRAY_ELEMS(info->activeRefIdx); i++) {
116         info->activeRefIdx[i] = h->h.refidx[i];
117         info->segmentPredProbs[i] = h->h.segmentation.pred_prob[i];
118         info->refFrameSignBias[i + 1] = h->h.signbias[i];
119     }
120 
121     for (i = 0; i < FF_ARRAY_ELEMS(info->segmentFeatureEnable); i++) {
122         info->segmentFeatureEnable[i][0] = h->h.segmentation.feat[i].q_enabled;
123         info->segmentFeatureEnable[i][1] = h->h.segmentation.feat[i].lf_enabled;
124         info->segmentFeatureEnable[i][2] = h->h.segmentation.feat[i].ref_enabled;
125         info->segmentFeatureEnable[i][3] = h->h.segmentation.feat[i].skip_enabled;
126 
127         info->segmentFeatureData[i][0] = h->h.segmentation.feat[i].q_val;
128         info->segmentFeatureData[i][1] = h->h.segmentation.feat[i].lf_val;
129         info->segmentFeatureData[i][2] = h->h.segmentation.feat[i].ref_val;
130         info->segmentFeatureData[i][3] = 0;
131     }
132 
133     switch (avctx->colorspace) {
134     default:
135     case AVCOL_SPC_UNSPECIFIED:
136         info->colorSpace = 0;
137         break;
138     case AVCOL_SPC_BT470BG:
139         info->colorSpace = 1;
140         break;
141     case AVCOL_SPC_BT709:
142         info->colorSpace = 2;
143         break;
144     case AVCOL_SPC_SMPTE170M:
145         info->colorSpace = 3;
146         break;
147     case AVCOL_SPC_SMPTE240M:
148         info->colorSpace = 4;
149         break;
150     case AVCOL_SPC_BT2020_NCL:
151         info->colorSpace = 5;
152         break;
153     case AVCOL_SPC_RESERVED:
154         info->colorSpace = 6;
155         break;
156     case AVCOL_SPC_RGB:
157         info->colorSpace = 7;
158         break;
159     }
160 
161     return ff_vdpau_common_start_frame(pic_ctx, buffer, size);
162 
163 }
164 
165 static const uint8_t start_code_prefix[3] = { 0x00, 0x00, 0x01 };
166 
vdpau_vp9_decode_slice(AVCodecContext * avctx,const uint8_t * buffer,uint32_t size)167 static int vdpau_vp9_decode_slice(AVCodecContext *avctx,
168                                    const uint8_t *buffer, uint32_t size)
169 {
170     VP9SharedContext *h = avctx->priv_data;
171     VP9Frame pic = h->frames[CUR_FRAME];
172     struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
173 
174     int val;
175 
176     val = ff_vdpau_add_buffer(pic_ctx, start_code_prefix, 3);
177     if (val)
178         return val;
179 
180     val = ff_vdpau_add_buffer(pic_ctx, buffer, size);
181     if (val)
182         return val;
183 
184     return 0;
185 }
186 
vdpau_vp9_end_frame(AVCodecContext * avctx)187 static int vdpau_vp9_end_frame(AVCodecContext *avctx)
188 {
189     VP9SharedContext *h = avctx->priv_data;
190     VP9Frame pic = h->frames[CUR_FRAME];
191     struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
192 
193     int val;
194 
195     val = ff_vdpau_common_end_frame(avctx, pic.tf.f, pic_ctx);
196     if (val < 0)
197         return val;
198 
199     return 0;
200 }
201 
vdpau_vp9_init(AVCodecContext * avctx)202 static int vdpau_vp9_init(AVCodecContext *avctx)
203 {
204     VdpDecoderProfile profile;
205     uint32_t level = avctx->level;
206 
207     switch (avctx->profile) {
208     case FF_PROFILE_VP9_0:
209         profile = VDP_DECODER_PROFILE_VP9_PROFILE_0;
210         break;
211     case FF_PROFILE_VP9_1:
212         profile = VDP_DECODER_PROFILE_VP9_PROFILE_1;
213         break;
214     case FF_PROFILE_VP9_2:
215         profile = VDP_DECODER_PROFILE_VP9_PROFILE_2;
216         break;
217     case FF_PROFILE_VP9_3:
218         profile = VDP_DECODER_PROFILE_VP9_PROFILE_3;
219         break;
220     default:
221         return AVERROR(ENOTSUP);
222     }
223 
224     return ff_vdpau_common_init(avctx, profile, level);
225 }
226 
227 const AVHWAccel ff_vp9_vdpau_hwaccel = {
228     .name           = "vp9_vdpau",
229     .type           = AVMEDIA_TYPE_VIDEO,
230     .id             = AV_CODEC_ID_VP9,
231     .pix_fmt        = AV_PIX_FMT_VDPAU,
232     .start_frame    = vdpau_vp9_start_frame,
233     .end_frame      = vdpau_vp9_end_frame,
234     .decode_slice   = vdpau_vp9_decode_slice,
235     .frame_priv_data_size = sizeof(struct vdpau_picture_context),
236     .init           = vdpau_vp9_init,
237     .uninit         = ff_vdpau_common_uninit,
238     .frame_params   = ff_vdpau_common_frame_params,
239     .priv_data_size = sizeof(VDPAUContext),
240     .caps_internal  = HWACCEL_CAP_ASYNC_SAFE,
241 };
242