1 /*
2 * This file is part of mpv.
3 *
4 * mpv is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * mpv is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "config.h"
19
20 #include <stddef.h>
21 #include <stdbool.h>
22 #include <pthread.h>
23 #include <assert.h>
24
25 #include <libavutil/buffer.h>
26 #include <libavutil/hwcontext.h>
27 #include <libavutil/mem.h>
28
29 #include "mpv_talloc.h"
30
31 #include "common/common.h"
32
33 #include "fmt-conversion.h"
34 #include "mp_image.h"
35 #include "mp_image_pool.h"
36
37 static pthread_mutex_t pool_mutex = PTHREAD_MUTEX_INITIALIZER;
38 #define pool_lock() pthread_mutex_lock(&pool_mutex)
39 #define pool_unlock() pthread_mutex_unlock(&pool_mutex)
40
41 // Thread-safety: the pool itself is not thread-safe, but pool-allocated images
42 // can be referenced and unreferenced from other threads. (As long as the image
43 // destructors are thread-safe.)
44
45 struct mp_image_pool {
46 struct mp_image **images;
47 int num_images;
48
49 int fmt, w, h;
50
51 mp_image_allocator allocator;
52 void *allocator_ctx;
53
54 bool use_lru;
55 unsigned int lru_counter;
56 };
57
58 // Used to gracefully handle the case when the pool is freed while image
59 // references allocated from the image pool are still held by someone.
60 struct image_flags {
61 // If both of these are false, the image must be freed.
62 bool referenced; // outside mp_image reference exists
63 bool pool_alive; // the mp_image_pool references this
64 unsigned int order; // for LRU allocation (basically a timestamp)
65 };
66
image_pool_destructor(void * ptr)67 static void image_pool_destructor(void *ptr)
68 {
69 struct mp_image_pool *pool = ptr;
70 mp_image_pool_clear(pool);
71 }
72
73 // If tparent!=NULL, set it as talloc parent for the pool.
mp_image_pool_new(void * tparent)74 struct mp_image_pool *mp_image_pool_new(void *tparent)
75 {
76 struct mp_image_pool *pool = talloc_ptrtype(tparent, pool);
77 talloc_set_destructor(pool, image_pool_destructor);
78 *pool = (struct mp_image_pool) {0};
79 return pool;
80 }
81
mp_image_pool_clear(struct mp_image_pool * pool)82 void mp_image_pool_clear(struct mp_image_pool *pool)
83 {
84 for (int n = 0; n < pool->num_images; n++) {
85 struct mp_image *img = pool->images[n];
86 struct image_flags *it = img->priv;
87 bool referenced;
88 pool_lock();
89 assert(it->pool_alive);
90 it->pool_alive = false;
91 referenced = it->referenced;
92 pool_unlock();
93 if (!referenced)
94 talloc_free(img);
95 }
96 pool->num_images = 0;
97 }
98
99 // This is the only function that is allowed to run in a different thread.
100 // (Consider passing an image to another thread, which frees it.)
unref_image(void * opaque,uint8_t * data)101 static void unref_image(void *opaque, uint8_t *data)
102 {
103 struct mp_image *img = opaque;
104 struct image_flags *it = img->priv;
105 bool alive;
106 pool_lock();
107 assert(it->referenced);
108 it->referenced = false;
109 alive = it->pool_alive;
110 pool_unlock();
111 if (!alive)
112 talloc_free(img);
113 }
114
115 // Return a new image of given format/size. Unlike mp_image_pool_get(), this
116 // returns NULL if there is no free image of this format/size.
mp_image_pool_get_no_alloc(struct mp_image_pool * pool,int fmt,int w,int h)117 struct mp_image *mp_image_pool_get_no_alloc(struct mp_image_pool *pool, int fmt,
118 int w, int h)
119 {
120 struct mp_image *new = NULL;
121 pool_lock();
122 for (int n = 0; n < pool->num_images; n++) {
123 struct mp_image *img = pool->images[n];
124 struct image_flags *img_it = img->priv;
125 assert(img_it->pool_alive);
126 if (!img_it->referenced) {
127 if (img->imgfmt == fmt && img->w == w && img->h == h) {
128 if (pool->use_lru) {
129 struct image_flags *new_it = new ? new->priv : NULL;
130 if (!new_it || new_it->order > img_it->order)
131 new = img;
132 } else {
133 new = img;
134 break;
135 }
136 }
137 }
138 }
139 pool_unlock();
140 if (!new)
141 return NULL;
142
143 // Reference the new image. Since mp_image_pool is not declared thread-safe,
144 // and unreffing images from other threads does not allocate new images,
145 // no synchronization is required here.
146 for (int p = 0; p < MP_MAX_PLANES; p++)
147 assert(!!new->bufs[p] == !p); // only 1 AVBufferRef
148
149 struct mp_image *ref = mp_image_new_dummy_ref(new);
150
151 // This assumes the buffer is at this point exclusively owned by us: we
152 // can't track whether the buffer is unique otherwise.
153 // (av_buffer_is_writable() checks the refcount of the new buffer only.)
154 int flags = av_buffer_is_writable(new->bufs[0]) ? 0 : AV_BUFFER_FLAG_READONLY;
155 ref->bufs[0] = av_buffer_create(new->bufs[0]->data, new->bufs[0]->size,
156 unref_image, new, flags);
157 if (!ref->bufs[0]) {
158 talloc_free(ref);
159 return NULL;
160 }
161
162 struct image_flags *it = new->priv;
163 assert(!it->referenced && it->pool_alive);
164 it->referenced = true;
165 it->order = ++pool->lru_counter;
166 return ref;
167 }
168
mp_image_pool_add(struct mp_image_pool * pool,struct mp_image * new)169 void mp_image_pool_add(struct mp_image_pool *pool, struct mp_image *new)
170 {
171 struct image_flags *it = talloc_ptrtype(new, it);
172 *it = (struct image_flags) { .pool_alive = true };
173 new->priv = it;
174 MP_TARRAY_APPEND(pool, pool->images, pool->num_images, new);
175 }
176
177 // Return a new image of given format/size. The only difference to
178 // mp_image_alloc() is that there is a transparent mechanism to recycle image
179 // data allocations through this pool.
180 // If pool==NULL, mp_image_alloc() is called (for convenience).
181 // The image can be free'd with talloc_free().
182 // Returns NULL on OOM.
mp_image_pool_get(struct mp_image_pool * pool,int fmt,int w,int h)183 struct mp_image *mp_image_pool_get(struct mp_image_pool *pool, int fmt,
184 int w, int h)
185 {
186 if (!pool)
187 return mp_image_alloc(fmt, w, h);
188 struct mp_image *new = mp_image_pool_get_no_alloc(pool, fmt, w, h);
189 if (!new) {
190 if (fmt != pool->fmt || w != pool->w || h != pool->h)
191 mp_image_pool_clear(pool);
192 pool->fmt = fmt;
193 pool->w = w;
194 pool->h = h;
195 if (pool->allocator) {
196 new = pool->allocator(pool->allocator_ctx, fmt, w, h);
197 } else {
198 new = mp_image_alloc(fmt, w, h);
199 }
200 if (!new)
201 return NULL;
202 mp_image_pool_add(pool, new);
203 new = mp_image_pool_get_no_alloc(pool, fmt, w, h);
204 }
205 return new;
206 }
207
208 // Like mp_image_new_copy(), but allocate the image out of the pool.
209 // If pool==NULL, a plain copy is made (for convenience).
210 // Returns NULL on OOM.
mp_image_pool_new_copy(struct mp_image_pool * pool,struct mp_image * img)211 struct mp_image *mp_image_pool_new_copy(struct mp_image_pool *pool,
212 struct mp_image *img)
213 {
214 struct mp_image *new = mp_image_pool_get(pool, img->imgfmt, img->w, img->h);
215 if (new) {
216 mp_image_copy(new, img);
217 mp_image_copy_attributes(new, img);
218 }
219 return new;
220 }
221
222 // Like mp_image_make_writeable(), but if a copy has to be made, allocate it
223 // out of the pool.
224 // If pool==NULL, mp_image_make_writeable() is called (for convenience).
225 // Returns false on failure (see mp_image_make_writeable()).
mp_image_pool_make_writeable(struct mp_image_pool * pool,struct mp_image * img)226 bool mp_image_pool_make_writeable(struct mp_image_pool *pool,
227 struct mp_image *img)
228 {
229 if (mp_image_is_writeable(img))
230 return true;
231 struct mp_image *new = mp_image_pool_new_copy(pool, img);
232 if (!new)
233 return false;
234 mp_image_steal_data(img, new);
235 assert(mp_image_is_writeable(img));
236 return true;
237 }
238
239 // Call cb(cb_data, fmt, w, h) to allocate an image. Note that the resulting
240 // image must use only 1 AVBufferRef. The returned image must also be owned
241 // exclusively by the image pool, otherwise mp_image_is_writeable() will not
242 // work due to FFmpeg restrictions.
mp_image_pool_set_allocator(struct mp_image_pool * pool,mp_image_allocator cb,void * cb_data)243 void mp_image_pool_set_allocator(struct mp_image_pool *pool,
244 mp_image_allocator cb, void *cb_data)
245 {
246 pool->allocator = cb;
247 pool->allocator_ctx = cb_data;
248 }
249
250 // Put into LRU mode. (Likely better for hwaccel surfaces, but worse for memory.)
mp_image_pool_set_lru(struct mp_image_pool * pool)251 void mp_image_pool_set_lru(struct mp_image_pool *pool)
252 {
253 pool->use_lru = true;
254 }
255
256 // Return the sw image format mp_image_hw_download() would use. This can be
257 // different from src->params.hw_subfmt in obscure cases.
mp_image_hw_download_get_sw_format(struct mp_image * src)258 int mp_image_hw_download_get_sw_format(struct mp_image *src)
259 {
260 if (!src->hwctx)
261 return 0;
262
263 // Try to find the first format which we can apparently use.
264 int imgfmt = 0;
265 enum AVPixelFormat *fmts;
266 if (av_hwframe_transfer_get_formats(src->hwctx,
267 AV_HWFRAME_TRANSFER_DIRECTION_FROM, &fmts, 0) < 0)
268 return 0;
269 for (int n = 0; fmts[n] != AV_PIX_FMT_NONE; n++) {
270 imgfmt = pixfmt2imgfmt(fmts[n]);
271 if (imgfmt)
272 break;
273 }
274 av_free(fmts);
275
276 return imgfmt;
277 }
278
279 // Copies the contents of the HW surface src to system memory and retuns it.
280 // If swpool is not NULL, it's used to allocate the target image.
281 // src must be a hw surface with a AVHWFramesContext attached.
282 // The returned image is cropped as needed.
283 // Returns NULL on failure.
mp_image_hw_download(struct mp_image * src,struct mp_image_pool * swpool)284 struct mp_image *mp_image_hw_download(struct mp_image *src,
285 struct mp_image_pool *swpool)
286 {
287 int imgfmt = mp_image_hw_download_get_sw_format(src);
288 if (!imgfmt)
289 return NULL;
290
291 assert(src->hwctx);
292 AVHWFramesContext *fctx = (void *)src->hwctx->data;
293
294 struct mp_image *dst =
295 mp_image_pool_get(swpool, imgfmt, fctx->width, fctx->height);
296 if (!dst)
297 return NULL;
298
299 // Target image must be writable, so unref it.
300 AVFrame *dstav = mp_image_to_av_frame_and_unref(dst);
301 if (!dstav)
302 return NULL;
303
304 AVFrame *srcav = mp_image_to_av_frame(src);
305 if (!srcav) {
306 av_frame_unref(dstav);
307 return NULL;
308 }
309
310 int res = av_hwframe_transfer_data(dstav, srcav, 0);
311 av_frame_free(&srcav);
312 dst = mp_image_from_av_frame(dstav);
313 av_frame_free(&dstav);
314 if (res >= 0 && dst) {
315 mp_image_set_size(dst, src->w, src->h);
316 mp_image_copy_attributes(dst, src);
317 } else {
318 mp_image_unrefp(&dst);
319 }
320 return dst;
321 }
322
mp_image_hw_upload(struct mp_image * hw_img,struct mp_image * src)323 bool mp_image_hw_upload(struct mp_image *hw_img, struct mp_image *src)
324 {
325 if (hw_img->w != src->w || hw_img->h != src->h)
326 return false;
327
328 if (!hw_img->hwctx || src->hwctx)
329 return false;
330
331 bool ok = false;
332 AVFrame *dstav = NULL;
333 AVFrame *srcav = NULL;
334
335 // This means the destination image will not be "writable", which would be
336 // a pain if Libav enforced this - fortunately it doesn't care. We can
337 // transfer data to it even if there are multiple refs.
338 dstav = mp_image_to_av_frame(hw_img);
339 if (!dstav)
340 goto done;
341
342 srcav = mp_image_to_av_frame(src);
343 if (!srcav)
344 goto done;
345
346 ok = av_hwframe_transfer_data(dstav, srcav, 0) >= 0;
347
348 done:
349 av_frame_unref(srcav);
350 av_frame_unref(dstav);
351
352 if (ok)
353 mp_image_copy_attributes(hw_img, src);
354 return ok;
355 }
356
mp_update_av_hw_frames_pool(struct AVBufferRef ** hw_frames_ctx,struct AVBufferRef * hw_device_ctx,int imgfmt,int sw_imgfmt,int w,int h)357 bool mp_update_av_hw_frames_pool(struct AVBufferRef **hw_frames_ctx,
358 struct AVBufferRef *hw_device_ctx,
359 int imgfmt, int sw_imgfmt, int w, int h)
360 {
361 enum AVPixelFormat format = imgfmt2pixfmt(imgfmt);
362 enum AVPixelFormat sw_format = imgfmt2pixfmt(sw_imgfmt);
363
364 if (format == AV_PIX_FMT_NONE || sw_format == AV_PIX_FMT_NONE ||
365 !hw_device_ctx || w < 1 || h < 1)
366 {
367 av_buffer_unref(hw_frames_ctx);
368 return false;
369 }
370
371 if (*hw_frames_ctx) {
372 AVHWFramesContext *hw_frames = (void *)(*hw_frames_ctx)->data;
373
374 if (hw_frames->device_ref->data != hw_device_ctx->data ||
375 hw_frames->format != format || hw_frames->sw_format != sw_format ||
376 hw_frames->width != w || hw_frames->height != h)
377 av_buffer_unref(hw_frames_ctx);
378 }
379
380 if (!*hw_frames_ctx) {
381 *hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
382 if (!*hw_frames_ctx)
383 return false;
384
385 AVHWFramesContext *hw_frames = (void *)(*hw_frames_ctx)->data;
386 hw_frames->format = format;
387 hw_frames->sw_format = sw_format;
388 hw_frames->width = w;
389 hw_frames->height = h;
390 if (av_hwframe_ctx_init(*hw_frames_ctx) < 0) {
391 av_buffer_unref(hw_frames_ctx);
392 return false;
393 }
394 }
395
396 return true;
397 }
398
mp_av_pool_image_hw_upload(struct AVBufferRef * hw_frames_ctx,struct mp_image * src)399 struct mp_image *mp_av_pool_image_hw_upload(struct AVBufferRef *hw_frames_ctx,
400 struct mp_image *src)
401 {
402 AVFrame *av_frame = av_frame_alloc();
403 if (!av_frame)
404 return NULL;
405 if (av_hwframe_get_buffer(hw_frames_ctx, av_frame, 0) < 0) {
406 av_frame_free(&av_frame);
407 return NULL;
408 }
409 struct mp_image *dst = mp_image_from_av_frame(av_frame);
410 av_frame_free(&av_frame);
411 if (!dst)
412 return NULL;
413
414 if (dst->w < src->w || dst->h < src->h) {
415 talloc_free(dst);
416 return NULL;
417 }
418
419 mp_image_set_size(dst, src->w, src->h);
420
421 if (!mp_image_hw_upload(dst, src)) {
422 talloc_free(dst);
423 return NULL;
424 }
425
426 mp_image_copy_attributes(dst, src);
427 return dst;
428 }
429