1 /*
2 *
3 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
4 *
5 * This source code is subject to the terms of the BSD 2 Clause License and
6 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
7 * was not distributed with this source code in the LICENSE file, you can
8 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
9 * Media Patent License 1.0 was not distributed with this source code in the
10 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
11 */
12
13 #include "config/aom_config.h"
14
15 #include "aom_mem/aom_mem.h"
16
17 #include "av1/common/alloccommon.h"
18 #include "av1/common/av1_common_int.h"
19 #include "av1/common/blockd.h"
20 #include "av1/common/entropymode.h"
21 #include "av1/common/entropymv.h"
22
av1_get_MBs(int width,int height)23 int av1_get_MBs(int width, int height) {
24 const int aligned_width = ALIGN_POWER_OF_TWO(width, 3);
25 const int aligned_height = ALIGN_POWER_OF_TWO(height, 3);
26 const int mi_cols = aligned_width >> MI_SIZE_LOG2;
27 const int mi_rows = aligned_height >> MI_SIZE_LOG2;
28
29 const int mb_cols = (mi_cols + 2) >> 2;
30 const int mb_rows = (mi_rows + 2) >> 2;
31 return mb_rows * mb_cols;
32 }
33
av1_free_ref_frame_buffers(BufferPool * pool)34 void av1_free_ref_frame_buffers(BufferPool *pool) {
35 int i;
36
37 for (i = 0; i < FRAME_BUFFERS; ++i) {
38 if (pool->frame_bufs[i].ref_count > 0 &&
39 pool->frame_bufs[i].raw_frame_buffer.data != NULL) {
40 pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
41 pool->frame_bufs[i].raw_frame_buffer.data = NULL;
42 pool->frame_bufs[i].raw_frame_buffer.size = 0;
43 pool->frame_bufs[i].raw_frame_buffer.priv = NULL;
44 pool->frame_bufs[i].ref_count = 0;
45 }
46 aom_free(pool->frame_bufs[i].mvs);
47 pool->frame_bufs[i].mvs = NULL;
48 aom_free(pool->frame_bufs[i].seg_map);
49 pool->frame_bufs[i].seg_map = NULL;
50 aom_free_frame_buffer(&pool->frame_bufs[i].buf);
51 }
52 }
53
54 #if !CONFIG_REALTIME_ONLY
55 // Assumes cm->rst_info[p].restoration_unit_size is already initialized
av1_alloc_restoration_buffers(AV1_COMMON * cm)56 void av1_alloc_restoration_buffers(AV1_COMMON *cm) {
57 const int num_planes = av1_num_planes(cm);
58 for (int p = 0; p < num_planes; ++p)
59 av1_alloc_restoration_struct(cm, &cm->rst_info[p], p > 0);
60
61 if (cm->rst_tmpbuf == NULL) {
62 CHECK_MEM_ERROR(cm, cm->rst_tmpbuf,
63 (int32_t *)aom_memalign(16, RESTORATION_TMPBUF_SIZE));
64 }
65
66 if (cm->rlbs == NULL) {
67 CHECK_MEM_ERROR(cm, cm->rlbs, aom_malloc(sizeof(RestorationLineBuffers)));
68 }
69
70 // For striped loop restoration, we divide each row of tiles into "stripes",
71 // of height 64 luma pixels but with an offset by RESTORATION_UNIT_OFFSET
72 // luma pixels to match the output from CDEF. We will need to store 2 *
73 // RESTORATION_CTX_VERT lines of data for each stripe, and also need to be
74 // able to quickly answer the question "Where is the <n>'th stripe for tile
75 // row <m>?" To make that efficient, we generate the rst_last_stripe array.
76 int num_stripes = 0;
77 for (int i = 0; i < cm->tiles.rows; ++i) {
78 TileInfo tile_info;
79 av1_tile_set_row(&tile_info, cm, i);
80 const int mi_h = tile_info.mi_row_end - tile_info.mi_row_start;
81 const int ext_h = RESTORATION_UNIT_OFFSET + (mi_h << MI_SIZE_LOG2);
82 const int tile_stripes = (ext_h + 63) / 64;
83 num_stripes += tile_stripes;
84 }
85
86 // Now we need to allocate enough space to store the line buffers for the
87 // stripes
88 const int frame_w = cm->superres_upscaled_width;
89 const int use_highbd = cm->seq_params.use_highbitdepth;
90
91 for (int p = 0; p < num_planes; ++p) {
92 const int is_uv = p > 0;
93 const int ss_x = is_uv && cm->seq_params.subsampling_x;
94 const int plane_w = ((frame_w + ss_x) >> ss_x) + 2 * RESTORATION_EXTRA_HORZ;
95 const int stride = ALIGN_POWER_OF_TWO(plane_w, 5);
96 const int buf_size = num_stripes * stride * RESTORATION_CTX_VERT
97 << use_highbd;
98 RestorationStripeBoundaries *boundaries = &cm->rst_info[p].boundaries;
99
100 if (buf_size != boundaries->stripe_boundary_size ||
101 boundaries->stripe_boundary_above == NULL ||
102 boundaries->stripe_boundary_below == NULL) {
103 aom_free(boundaries->stripe_boundary_above);
104 aom_free(boundaries->stripe_boundary_below);
105
106 CHECK_MEM_ERROR(cm, boundaries->stripe_boundary_above,
107 (uint8_t *)aom_memalign(32, buf_size));
108 CHECK_MEM_ERROR(cm, boundaries->stripe_boundary_below,
109 (uint8_t *)aom_memalign(32, buf_size));
110
111 boundaries->stripe_boundary_size = buf_size;
112 }
113 boundaries->stripe_boundary_stride = stride;
114 }
115 }
116
av1_free_restoration_buffers(AV1_COMMON * cm)117 void av1_free_restoration_buffers(AV1_COMMON *cm) {
118 int p;
119 for (p = 0; p < MAX_MB_PLANE; ++p)
120 av1_free_restoration_struct(&cm->rst_info[p]);
121 aom_free(cm->rst_tmpbuf);
122 cm->rst_tmpbuf = NULL;
123 aom_free(cm->rlbs);
124 cm->rlbs = NULL;
125 for (p = 0; p < MAX_MB_PLANE; ++p) {
126 RestorationStripeBoundaries *boundaries = &cm->rst_info[p].boundaries;
127 aom_free(boundaries->stripe_boundary_above);
128 aom_free(boundaries->stripe_boundary_below);
129 boundaries->stripe_boundary_above = NULL;
130 boundaries->stripe_boundary_below = NULL;
131 }
132
133 aom_free_frame_buffer(&cm->rst_frame);
134 }
135 #endif // !CONFIG_REALTIME_ONLY
136
av1_free_above_context_buffers(CommonContexts * above_contexts)137 void av1_free_above_context_buffers(CommonContexts *above_contexts) {
138 int i;
139 const int num_planes = above_contexts->num_planes;
140
141 for (int tile_row = 0; tile_row < above_contexts->num_tile_rows; tile_row++) {
142 for (i = 0; i < num_planes; i++) {
143 aom_free(above_contexts->entropy[i][tile_row]);
144 above_contexts->entropy[i][tile_row] = NULL;
145 }
146 aom_free(above_contexts->partition[tile_row]);
147 above_contexts->partition[tile_row] = NULL;
148
149 aom_free(above_contexts->txfm[tile_row]);
150 above_contexts->txfm[tile_row] = NULL;
151 }
152 for (i = 0; i < num_planes; i++) {
153 aom_free(above_contexts->entropy[i]);
154 above_contexts->entropy[i] = NULL;
155 }
156 aom_free(above_contexts->partition);
157 above_contexts->partition = NULL;
158
159 aom_free(above_contexts->txfm);
160 above_contexts->txfm = NULL;
161
162 above_contexts->num_tile_rows = 0;
163 above_contexts->num_mi_cols = 0;
164 above_contexts->num_planes = 0;
165 }
166
av1_free_context_buffers(AV1_COMMON * cm)167 void av1_free_context_buffers(AV1_COMMON *cm) {
168 cm->mi_params.free_mi(&cm->mi_params);
169
170 av1_free_above_context_buffers(&cm->above_contexts);
171
172 #if CONFIG_LPF_MASK
173 av1_free_loop_filter_mask(cm);
174 #endif
175 }
176
av1_alloc_above_context_buffers(CommonContexts * above_contexts,int num_tile_rows,int num_mi_cols,int num_planes)177 int av1_alloc_above_context_buffers(CommonContexts *above_contexts,
178 int num_tile_rows, int num_mi_cols,
179 int num_planes) {
180 const int aligned_mi_cols =
181 ALIGN_POWER_OF_TWO(num_mi_cols, MAX_MIB_SIZE_LOG2);
182
183 // Allocate above context buffers
184 above_contexts->num_tile_rows = num_tile_rows;
185 above_contexts->num_mi_cols = aligned_mi_cols;
186 above_contexts->num_planes = num_planes;
187 for (int plane_idx = 0; plane_idx < num_planes; plane_idx++) {
188 above_contexts->entropy[plane_idx] = (ENTROPY_CONTEXT **)aom_calloc(
189 num_tile_rows, sizeof(above_contexts->entropy[0]));
190 if (!above_contexts->entropy[plane_idx]) return 1;
191 }
192
193 above_contexts->partition = (PARTITION_CONTEXT **)aom_calloc(
194 num_tile_rows, sizeof(above_contexts->partition));
195 if (!above_contexts->partition) return 1;
196
197 above_contexts->txfm =
198 (TXFM_CONTEXT **)aom_calloc(num_tile_rows, sizeof(above_contexts->txfm));
199 if (!above_contexts->txfm) return 1;
200
201 for (int tile_row = 0; tile_row < num_tile_rows; tile_row++) {
202 for (int plane_idx = 0; plane_idx < num_planes; plane_idx++) {
203 above_contexts->entropy[plane_idx][tile_row] =
204 (ENTROPY_CONTEXT *)aom_calloc(
205 aligned_mi_cols, sizeof(*above_contexts->entropy[0][tile_row]));
206 if (!above_contexts->entropy[plane_idx][tile_row]) return 1;
207 }
208
209 above_contexts->partition[tile_row] = (PARTITION_CONTEXT *)aom_calloc(
210 aligned_mi_cols, sizeof(*above_contexts->partition[tile_row]));
211 if (!above_contexts->partition[tile_row]) return 1;
212
213 above_contexts->txfm[tile_row] = (TXFM_CONTEXT *)aom_calloc(
214 aligned_mi_cols, sizeof(*above_contexts->txfm[tile_row]));
215 if (!above_contexts->txfm[tile_row]) return 1;
216 }
217
218 return 0;
219 }
220
221 // Allocate the dynamically allocated arrays in 'mi_params' assuming
222 // 'mi_params->set_mb_mi()' was already called earlier to initialize the rest of
223 // the struct members.
alloc_mi(CommonModeInfoParams * mi_params)224 static int alloc_mi(CommonModeInfoParams *mi_params) {
225 const int aligned_mi_rows = calc_mi_size(mi_params->mi_rows);
226 const int mi_grid_size = mi_params->mi_stride * aligned_mi_rows;
227 const int alloc_size_1d = mi_size_wide[mi_params->mi_alloc_bsize];
228 const int alloc_mi_size =
229 mi_params->mi_alloc_stride * (aligned_mi_rows / alloc_size_1d);
230
231 if (mi_params->mi_alloc_size < alloc_mi_size ||
232 mi_params->mi_grid_size < mi_grid_size) {
233 mi_params->free_mi(mi_params);
234
235 mi_params->mi_alloc =
236 aom_calloc(alloc_mi_size, sizeof(*mi_params->mi_alloc));
237 if (!mi_params->mi_alloc) return 1;
238 mi_params->mi_alloc_size = alloc_mi_size;
239
240 mi_params->mi_grid_base = (MB_MODE_INFO **)aom_calloc(
241 mi_grid_size, sizeof(*mi_params->mi_grid_base));
242 if (!mi_params->mi_grid_base) return 1;
243 mi_params->mi_grid_size = mi_grid_size;
244
245 mi_params->tx_type_map =
246 aom_calloc(mi_grid_size, sizeof(*mi_params->tx_type_map));
247 if (!mi_params->tx_type_map) return 1;
248 }
249
250 return 0;
251 }
252
av1_alloc_context_buffers(AV1_COMMON * cm,int width,int height)253 int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) {
254 CommonModeInfoParams *const mi_params = &cm->mi_params;
255 mi_params->set_mb_mi(mi_params, width, height);
256 if (alloc_mi(mi_params)) goto fail;
257 return 0;
258
259 fail:
260 // clear the mi_* values to force a realloc on resync
261 mi_params->set_mb_mi(mi_params, 0, 0);
262 av1_free_context_buffers(cm);
263 return 1;
264 }
265
av1_remove_common(AV1_COMMON * cm)266 void av1_remove_common(AV1_COMMON *cm) {
267 av1_free_context_buffers(cm);
268
269 aom_free(cm->fc);
270 cm->fc = NULL;
271 aom_free(cm->default_frame_context);
272 cm->default_frame_context = NULL;
273 }
274
av1_init_mi_buffers(CommonModeInfoParams * mi_params)275 void av1_init_mi_buffers(CommonModeInfoParams *mi_params) {
276 mi_params->setup_mi(mi_params);
277 }
278
279 #if CONFIG_LPF_MASK
av1_alloc_loop_filter_mask(AV1_COMMON * cm)280 int av1_alloc_loop_filter_mask(AV1_COMMON *cm) {
281 aom_free(cm->lf.lfm);
282 cm->lf.lfm = NULL;
283
284 // Each lfm holds bit masks for all the 4x4 blocks in a max
285 // 64x64 (128x128 for ext_partitions) region. The stride
286 // and rows are rounded up / truncated to a multiple of 16
287 // (32 for ext_partition).
288 cm->lf.lfm_stride =
289 (cm->mi_params.mi_cols + (MI_SIZE_64X64 - 1)) >> MIN_MIB_SIZE_LOG2;
290 cm->lf.lfm_num =
291 ((cm->mi_params.mi_rows + (MI_SIZE_64X64 - 1)) >> MIN_MIB_SIZE_LOG2) *
292 cm->lf.lfm_stride;
293 cm->lf.lfm =
294 (LoopFilterMask *)aom_calloc(cm->lf.lfm_num, sizeof(*cm->lf.lfm));
295 if (!cm->lf.lfm) return 1;
296
297 unsigned int i;
298 for (i = 0; i < cm->lf.lfm_num; ++i) av1_zero(cm->lf.lfm[i]);
299
300 return 0;
301 }
302
av1_free_loop_filter_mask(AV1_COMMON * cm)303 void av1_free_loop_filter_mask(AV1_COMMON *cm) {
304 if (cm->lf.lfm == NULL) return;
305
306 aom_free(cm->lf.lfm);
307 cm->lf.lfm = NULL;
308 cm->lf.lfm_num = 0;
309 cm->lf.lfm_stride = 0;
310 }
311 #endif
312