1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include "av1/common/cfl.h"
13 #include "av1/common/common_data.h"
14 #include "av1/common/onyxc_int.h"
15 
cfl_init(CFL_CTX * cfl,AV1_COMMON * cm)16 void cfl_init(CFL_CTX *cfl, AV1_COMMON *cm) {
17   if (!((cm->subsampling_x == 0 && cm->subsampling_y == 0) ||
18         (cm->subsampling_x == 1 && cm->subsampling_y == 1))) {
19     aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
20                        "Only 4:4:4 and 4:2:0 are currently supported by CfL");
21   }
22   memset(&cfl->pred_buf_q3, 0, sizeof(cfl->pred_buf_q3));
23   cfl->subsampling_x = cm->subsampling_x;
24   cfl->subsampling_y = cm->subsampling_y;
25   cfl->are_parameters_computed = 0;
26   cfl->store_y = 0;
27 #if CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
28   cfl_clear_sub8x8_val(cfl);
29 #endif  // CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
30 }
31 
32 // Due to frame boundary issues, it is possible that the total area covered by
33 // chroma exceeds that of luma. When this happens, we fill the missing pixels by
34 // repeating the last columns and/or rows.
cfl_pad(CFL_CTX * cfl,int width,int height)35 static INLINE void cfl_pad(CFL_CTX *cfl, int width, int height) {
36   const int diff_width = width - cfl->buf_width;
37   const int diff_height = height - cfl->buf_height;
38 
39   if (diff_width > 0) {
40     const int min_height = height - diff_height;
41     int16_t *pred_buf_q3 = cfl->pred_buf_q3 + (width - diff_width);
42     for (int j = 0; j < min_height; j++) {
43       const int last_pixel = pred_buf_q3[-1];
44       for (int i = 0; i < diff_width; i++) {
45         pred_buf_q3[i] = last_pixel;
46       }
47       pred_buf_q3 += MAX_SB_SIZE;
48     }
49     cfl->buf_width = width;
50   }
51   if (diff_height > 0) {
52     int16_t *pred_buf_q3 =
53         cfl->pred_buf_q3 + ((height - diff_height) * MAX_SB_SIZE);
54     for (int j = 0; j < diff_height; j++) {
55       const int16_t *last_row_q3 = pred_buf_q3 - MAX_SB_SIZE;
56       for (int i = 0; i < width; i++) {
57         pred_buf_q3[i] = last_row_q3[i];
58       }
59       pred_buf_q3 += MAX_SB_SIZE;
60     }
61     cfl->buf_height = height;
62   }
63 }
64 
sum_above_row_lbd(const uint8_t * above_u,const uint8_t * above_v,int width,int * out_sum_u,int * out_sum_v)65 static void sum_above_row_lbd(const uint8_t *above_u, const uint8_t *above_v,
66                               int width, int *out_sum_u, int *out_sum_v) {
67   int sum_u = 0;
68   int sum_v = 0;
69   for (int i = 0; i < width; i++) {
70     sum_u += above_u[i];
71     sum_v += above_v[i];
72   }
73   *out_sum_u += sum_u;
74   *out_sum_v += sum_v;
75 }
76 #if CONFIG_HIGHBITDEPTH
sum_above_row_hbd(const uint16_t * above_u,const uint16_t * above_v,int width,int * out_sum_u,int * out_sum_v)77 static void sum_above_row_hbd(const uint16_t *above_u, const uint16_t *above_v,
78                               int width, int *out_sum_u, int *out_sum_v) {
79   int sum_u = 0;
80   int sum_v = 0;
81   for (int i = 0; i < width; i++) {
82     sum_u += above_u[i];
83     sum_v += above_v[i];
84   }
85   *out_sum_u += sum_u;
86   *out_sum_v += sum_v;
87 }
88 #endif  // CONFIG_HIGHBITDEPTH
89 
sum_above_row(const MACROBLOCKD * xd,int width,int * out_sum_u,int * out_sum_v)90 static void sum_above_row(const MACROBLOCKD *xd, int width, int *out_sum_u,
91                           int *out_sum_v) {
92   const struct macroblockd_plane *const pd_u = &xd->plane[AOM_PLANE_U];
93   const struct macroblockd_plane *const pd_v = &xd->plane[AOM_PLANE_V];
94 #if CONFIG_HIGHBITDEPTH
95   if (get_bitdepth_data_path_index(xd)) {
96     const uint16_t *above_u_16 =
97         CONVERT_TO_SHORTPTR(pd_u->dst.buf) - pd_u->dst.stride;
98     const uint16_t *above_v_16 =
99         CONVERT_TO_SHORTPTR(pd_v->dst.buf) - pd_v->dst.stride;
100     sum_above_row_hbd(above_u_16, above_v_16, width, out_sum_u, out_sum_v);
101     return;
102   }
103 #endif  // CONFIG_HIGHBITDEPTH
104   const uint8_t *above_u = pd_u->dst.buf - pd_u->dst.stride;
105   const uint8_t *above_v = pd_v->dst.buf - pd_v->dst.stride;
106   sum_above_row_lbd(above_u, above_v, width, out_sum_u, out_sum_v);
107 }
108 
sum_left_col_lbd(const uint8_t * left_u,int u_stride,const uint8_t * left_v,int v_stride,int height,int * out_sum_u,int * out_sum_v)109 static void sum_left_col_lbd(const uint8_t *left_u, int u_stride,
110                              const uint8_t *left_v, int v_stride, int height,
111                              int *out_sum_u, int *out_sum_v) {
112   int sum_u = 0;
113   int sum_v = 0;
114   for (int i = 0; i < height; i++) {
115     sum_u += left_u[i * u_stride];
116     sum_v += left_v[i * v_stride];
117   }
118   *out_sum_u += sum_u;
119   *out_sum_v += sum_v;
120 }
121 #if CONFIG_HIGHBITDEPTH
sum_left_col_hbd(const uint16_t * left_u,int u_stride,const uint16_t * left_v,int v_stride,int height,int * out_sum_u,int * out_sum_v)122 static void sum_left_col_hbd(const uint16_t *left_u, int u_stride,
123                              const uint16_t *left_v, int v_stride, int height,
124                              int *out_sum_u, int *out_sum_v) {
125   int sum_u = 0;
126   int sum_v = 0;
127   for (int i = 0; i < height; i++) {
128     sum_u += left_u[i * u_stride];
129     sum_v += left_v[i * v_stride];
130   }
131   *out_sum_u += sum_u;
132   *out_sum_v += sum_v;
133 }
134 #endif  // CONFIG_HIGHBITDEPTH
sum_left_col(const MACROBLOCKD * xd,int height,int * out_sum_u,int * out_sum_v)135 static void sum_left_col(const MACROBLOCKD *xd, int height, int *out_sum_u,
136                          int *out_sum_v) {
137   const struct macroblockd_plane *const pd_u = &xd->plane[AOM_PLANE_U];
138   const struct macroblockd_plane *const pd_v = &xd->plane[AOM_PLANE_V];
139 
140 #if CONFIG_HIGHBITDEPTH
141   if (get_bitdepth_data_path_index(xd)) {
142     const uint16_t *left_u_16 = CONVERT_TO_SHORTPTR(pd_u->dst.buf) - 1;
143     const uint16_t *left_v_16 = CONVERT_TO_SHORTPTR(pd_v->dst.buf) - 1;
144     sum_left_col_hbd(left_u_16, pd_u->dst.stride, left_v_16, pd_v->dst.stride,
145                      height, out_sum_u, out_sum_v);
146     return;
147   }
148 #endif  // CONFIG_HIGHBITDEPTH
149   const uint8_t *left_u = pd_u->dst.buf - 1;
150   const uint8_t *left_v = pd_v->dst.buf - 1;
151   sum_left_col_lbd(left_u, pd_u->dst.stride, left_v, pd_v->dst.stride, height,
152                    out_sum_u, out_sum_v);
153 }
154 
155 // CfL computes its own block-level DC_PRED. This is required to compute both
156 // alpha_cb and alpha_cr before the prediction are computed.
cfl_dc_pred(MACROBLOCKD * xd,BLOCK_SIZE plane_bsize)157 static void cfl_dc_pred(MACROBLOCKD *xd, BLOCK_SIZE plane_bsize) {
158   CFL_CTX *const cfl = xd->cfl;
159 
160   // Compute DC_PRED until block boundary. We can't assume the neighbor will use
161   // the same transform size.
162   const int width = max_block_wide(xd, plane_bsize, AOM_PLANE_U)
163                     << tx_size_wide_log2[0];
164   const int height = max_block_high(xd, plane_bsize, AOM_PLANE_U)
165                      << tx_size_high_log2[0];
166   // Number of pixel on the top and left borders.
167   const int num_pel = width + height;
168 
169   int sum_u = 0;
170   int sum_v = 0;
171 
172 // Match behavior of build_intra_predictors_high (reconintra.c) at superblock
173 // boundaries:
174 // base-1 base-1 base-1 .. base-1 base-1 base-1 base-1 base-1 base-1
175 // base+1   A      B  ..     Y      Z
176 // base+1   C      D  ..     W      X
177 // base+1   E      F  ..     U      V
178 // base+1   G      H  ..     S      T      T      T      T      T
179 // ..
180 
181 #if CONFIG_CHROMA_SUB8X8
182   if (xd->chroma_up_available && xd->mb_to_right_edge >= 0) {
183 #else
184   if (xd->up_available && xd->mb_to_right_edge >= 0) {
185 #endif
186     sum_above_row(xd, width, &sum_u, &sum_v);
187   } else {
188     const int base = 128 << (xd->bd - 8);
189     sum_u = width * (base - 1);
190     sum_v = width * (base - 1);
191   }
192 
193 #if CONFIG_CHROMA_SUB8X8
194   if (xd->chroma_left_available && xd->mb_to_bottom_edge >= 0) {
195 #else
196   if (xd->left_available && xd->mb_to_bottom_edge >= 0) {
197 #endif
198     sum_left_col(xd, height, &sum_u, &sum_v);
199   } else {
200     const int base = 128 << (xd->bd - 8);
201     sum_u += height * (base + 1);
202     sum_v += height * (base + 1);
203   }
204 
205   // TODO(ltrudeau) Because of max_block_wide and max_block_high, num_pel will
206   // not be a power of two. So these divisions will have to use a lookup table.
207   cfl->dc_pred[CFL_PRED_U] = (sum_u + (num_pel >> 1)) / num_pel;
208   cfl->dc_pred[CFL_PRED_V] = (sum_v + (num_pel >> 1)) / num_pel;
209 }
210 
211 static void cfl_subtract_averages(CFL_CTX *cfl, TX_SIZE tx_size) {
212   const int width = cfl->uv_width;
213   const int height = cfl->uv_height;
214   const int tx_height = tx_size_high[tx_size];
215   const int tx_width = tx_size_wide[tx_size];
216   const int block_row_stride = MAX_SB_SIZE << tx_size_high_log2[tx_size];
217   const int num_pel_log2 =
218       (tx_size_high_log2[tx_size] + tx_size_wide_log2[tx_size]);
219 
220   int16_t *pred_buf_q3 = cfl->pred_buf_q3;
221 
222   cfl_pad(cfl, width, height);
223 
224   for (int b_j = 0; b_j < height; b_j += tx_height) {
225     for (int b_i = 0; b_i < width; b_i += tx_width) {
226       int sum_q3 = 0;
227       int16_t *tx_pred_buf_q3 = pred_buf_q3;
228       for (int t_j = 0; t_j < tx_height; t_j++) {
229         for (int t_i = b_i; t_i < b_i + tx_width; t_i++) {
230           sum_q3 += tx_pred_buf_q3[t_i];
231         }
232         tx_pred_buf_q3 += MAX_SB_SIZE;
233       }
234       int avg_q3 = (sum_q3 + (1 << (num_pel_log2 - 1))) >> num_pel_log2;
235       // Loss is never more than 1/2 (in Q3)
236       assert(fabs((double)avg_q3 - (sum_q3 / ((double)(1 << num_pel_log2)))) <=
237              0.5);
238 
239       tx_pred_buf_q3 = pred_buf_q3;
240       for (int t_j = 0; t_j < tx_height; t_j++) {
241         for (int t_i = b_i; t_i < b_i + tx_width; t_i++) {
242           tx_pred_buf_q3[t_i] -= avg_q3;
243         }
244 
245         tx_pred_buf_q3 += MAX_SB_SIZE;
246       }
247     }
248     pred_buf_q3 += block_row_stride;
249   }
250 }
251 
252 static INLINE int cfl_idx_to_alpha(int alpha_idx, int joint_sign,
253                                    CFL_PRED_TYPE pred_type) {
254   const int alpha_sign = (pred_type == CFL_PRED_U) ? CFL_SIGN_U(joint_sign)
255                                                    : CFL_SIGN_V(joint_sign);
256   if (alpha_sign == CFL_SIGN_ZERO) return 0;
257   const int abs_alpha_q3 =
258       (pred_type == CFL_PRED_U) ? CFL_IDX_U(alpha_idx) : CFL_IDX_V(alpha_idx);
259   return (alpha_sign == CFL_SIGN_POS) ? abs_alpha_q3 + 1 : -abs_alpha_q3 - 1;
260 }
261 
262 static void cfl_build_prediction_lbd(const int16_t *pred_buf_q3, uint8_t *dst,
263                                      int dst_stride, int width, int height,
264                                      int alpha_q3, int dc_pred) {
265   for (int j = 0; j < height; j++) {
266     for (int i = 0; i < width; i++) {
267       dst[i] =
268           clip_pixel(get_scaled_luma_q0(alpha_q3, pred_buf_q3[i]) + dc_pred);
269     }
270     dst += dst_stride;
271     pred_buf_q3 += MAX_SB_SIZE;
272   }
273 }
274 
275 #if CONFIG_HIGHBITDEPTH
276 static void cfl_build_prediction_hbd(const int16_t *pred_buf_q3, uint16_t *dst,
277                                      int dst_stride, int width, int height,
278                                      int alpha_q3, int dc_pred, int bit_depth) {
279   for (int j = 0; j < height; j++) {
280     for (int i = 0; i < width; i++) {
281       dst[i] = clip_pixel_highbd(
282           get_scaled_luma_q0(alpha_q3, pred_buf_q3[i]) + dc_pred, bit_depth);
283     }
284     dst += dst_stride;
285     pred_buf_q3 += MAX_SB_SIZE;
286   }
287 }
288 #endif  // CONFIG_HIGHBITDEPTH
289 
290 static void cfl_build_prediction(const int16_t *pred_buf_q3, uint8_t *dst,
291                                  int dst_stride, int width, int height,
292                                  int alpha_q3, int dc_pred, int use_hbd,
293                                  int bit_depth) {
294 #if CONFIG_HIGHBITDEPTH
295   if (use_hbd) {
296     uint16_t *dst_16 = CONVERT_TO_SHORTPTR(dst);
297     cfl_build_prediction_hbd(pred_buf_q3, dst_16, dst_stride, width, height,
298                              alpha_q3, dc_pred, bit_depth);
299     return;
300   }
301 #endif  // CONFIG_HIGHBITDEPTH
302   (void)use_hbd;
303   (void)bit_depth;
304   cfl_build_prediction_lbd(pred_buf_q3, dst, dst_stride, width, height,
305                            alpha_q3, dc_pred);
306 }
307 
308 void cfl_predict_block(MACROBLOCKD *const xd, uint8_t *dst, int dst_stride,
309                        int row, int col, TX_SIZE tx_size, int plane) {
310   CFL_CTX *const cfl = xd->cfl;
311   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
312 
313   // CfL parameters must be computed before prediction can be done.
314   assert(cfl->are_parameters_computed == 1);
315 
316   const int16_t *pred_buf_q3 =
317       cfl->pred_buf_q3 + ((row * MAX_SB_SIZE + col) << tx_size_wide_log2[0]);
318   const int alpha_q3 =
319       cfl_idx_to_alpha(mbmi->cfl_alpha_idx, mbmi->cfl_alpha_signs, plane - 1);
320 
321   cfl_build_prediction(pred_buf_q3, dst, dst_stride, tx_size_wide[tx_size],
322                        tx_size_high[tx_size], alpha_q3, cfl->dc_pred[plane - 1],
323                        get_bitdepth_data_path_index(xd), xd->bd);
324 }
325 
326 static void cfl_luma_subsampling_420_lbd(const uint8_t *input, int input_stride,
327                                          int16_t *output_q3, int width,
328                                          int height) {
329   for (int j = 0; j < height; j++) {
330     for (int i = 0; i < width; i++) {
331       int top = i << 1;
332       int bot = top + input_stride;
333       output_q3[i] = (input[top] + input[top + 1] + input[bot] + input[bot + 1])
334                      << 1;
335     }
336     input += input_stride << 1;
337     output_q3 += MAX_SB_SIZE;
338   }
339 }
340 
341 static void cfl_luma_subsampling_444_lbd(const uint8_t *input, int input_stride,
342                                          int16_t *output_q3, int width,
343                                          int height) {
344   for (int j = 0; j < height; j++) {
345     for (int i = 0; i < width; i++) {
346       output_q3[i] = input[i] << 3;
347     }
348     input += input_stride;
349     output_q3 += MAX_SB_SIZE;
350   }
351 }
352 
353 #if CONFIG_HIGHBITDEPTH
354 static void cfl_luma_subsampling_420_hbd(const uint16_t *input,
355                                          int input_stride, int16_t *output_q3,
356                                          int width, int height) {
357   for (int j = 0; j < height; j++) {
358     for (int i = 0; i < width; i++) {
359       int top = i << 1;
360       int bot = top + input_stride;
361       output_q3[i] = (input[top] + input[top + 1] + input[bot] + input[bot + 1])
362                      << 1;
363     }
364     input += input_stride << 1;
365     output_q3 += MAX_SB_SIZE;
366   }
367 }
368 
369 static void cfl_luma_subsampling_444_hbd(const uint16_t *input,
370                                          int input_stride, int16_t *output_q3,
371                                          int width, int height) {
372   for (int j = 0; j < height; j++) {
373     for (int i = 0; i < width; i++) {
374       output_q3[i] = input[i] << 3;
375     }
376     input += input_stride;
377     output_q3 += MAX_SB_SIZE;
378   }
379 }
380 #endif  // CONFIG_HIGHBITDEPTH
381 
382 static void cfl_luma_subsampling_420(const uint8_t *input, int input_stride,
383                                      int16_t *output_q3, int width, int height,
384                                      int use_hbd) {
385 #if CONFIG_HIGHBITDEPTH
386   if (use_hbd) {
387     const uint16_t *input_16 = CONVERT_TO_SHORTPTR(input);
388     cfl_luma_subsampling_420_hbd(input_16, input_stride, output_q3, width,
389                                  height);
390     return;
391   }
392 #endif  // CONFIG_HIGHBITDEPTH
393   (void)use_hbd;
394   cfl_luma_subsampling_420_lbd(input, input_stride, output_q3, width, height);
395 }
396 
397 static void cfl_luma_subsampling_444(const uint8_t *input, int input_stride,
398                                      int16_t *output_q3, int width, int height,
399                                      int use_hbd) {
400 #if CONFIG_HIGHBITDEPTH
401   if (use_hbd) {
402     uint16_t *input_16 = CONVERT_TO_SHORTPTR(input);
403     cfl_luma_subsampling_444_hbd(input_16, input_stride, output_q3, width,
404                                  height);
405     return;
406   }
407 #endif  // CONFIG_HIGHBITDEPTH
408   (void)use_hbd;
409   cfl_luma_subsampling_444_lbd(input, input_stride, output_q3, width, height);
410 }
411 
412 static INLINE void cfl_store(CFL_CTX *cfl, const uint8_t *input,
413                              int input_stride, int row, int col, int width,
414                              int height, int use_hbd) {
415   const int tx_off_log2 = tx_size_wide_log2[0];
416   const int sub_x = cfl->subsampling_x;
417   const int sub_y = cfl->subsampling_y;
418   const int store_row = row << (tx_off_log2 - sub_y);
419   const int store_col = col << (tx_off_log2 - sub_x);
420   const int store_height = height >> sub_y;
421   const int store_width = width >> sub_x;
422 
423   // Invalidate current parameters
424   cfl->are_parameters_computed = 0;
425 
426   // Store the surface of the pixel buffer that was written to, this way we
427   // can manage chroma overrun (e.g. when the chroma surfaces goes beyond the
428   // frame boundary)
429   if (col == 0 && row == 0) {
430     cfl->buf_width = store_width;
431     cfl->buf_height = store_height;
432   } else {
433     cfl->buf_width = OD_MAXI(store_col + store_width, cfl->buf_width);
434     cfl->buf_height = OD_MAXI(store_row + store_height, cfl->buf_height);
435   }
436 
437   // Check that we will remain inside the pixel buffer.
438   assert(store_row + store_height <= MAX_SB_SIZE);
439   assert(store_col + store_width <= MAX_SB_SIZE);
440 
441   // Store the input into the CfL pixel buffer
442   int16_t *pred_buf_q3 =
443       cfl->pred_buf_q3 + (store_row * MAX_SB_SIZE + store_col);
444 
445   if (sub_y == 0 && sub_x == 0) {
446     cfl_luma_subsampling_444(input, input_stride, pred_buf_q3, store_width,
447                              store_height, use_hbd);
448   } else if (sub_y == 1 && sub_x == 1) {
449     cfl_luma_subsampling_420(input, input_stride, pred_buf_q3, store_width,
450                              store_height, use_hbd);
451   } else {
452     // TODO(ltrudeau) add support for 4:2:2
453     assert(0);  // Unsupported chroma subsampling
454   }
455 }
456 
457 #if CONFIG_CHROMA_SUB8X8
458 // Adjust the row and column of blocks smaller than 8X8, as chroma-referenced
459 // and non-chroma-referenced blocks are stored together in the CfL buffer.
460 static INLINE void sub8x8_adjust_offset(const CFL_CTX *cfl, int *row_out,
461                                         int *col_out) {
462   // Increment row index for bottom: 8x4, 16x4 or both bottom 4x4s.
463   if ((cfl->mi_row & 0x01) && cfl->subsampling_y) {
464     assert(*row_out == 0);
465     (*row_out)++;
466   }
467 
468   // Increment col index for right: 4x8, 4x16 or both right 4x4s.
469   if ((cfl->mi_col & 0x01) && cfl->subsampling_x) {
470     assert(*col_out == 0);
471     (*col_out)++;
472   }
473 }
474 #if CONFIG_DEBUG
475 static INLINE void sub8x8_set_val(CFL_CTX *cfl, int row, int col, int val_high,
476                                   int val_wide) {
477   for (int val_r = 0; val_r < val_high; val_r++) {
478     assert(row + val_r < CFL_SUB8X8_VAL_MI_SIZE);
479     int row_off = (row + val_r) * CFL_SUB8X8_VAL_MI_SIZE;
480     for (int val_c = 0; val_c < val_wide; val_c++) {
481       assert(col + val_c < CFL_SUB8X8_VAL_MI_SIZE);
482       assert(cfl->sub8x8_val[row_off + col + val_c] == 0);
483       cfl->sub8x8_val[row_off + col + val_c]++;
484     }
485   }
486 }
487 #endif  // CONFIG_DEBUG
488 #endif  // CONFIG_CHROMA_SUB8X8
489 
490 void cfl_store_tx(MACROBLOCKD *const xd, int row, int col, TX_SIZE tx_size,
491                   BLOCK_SIZE bsize) {
492   CFL_CTX *const cfl = xd->cfl;
493   struct macroblockd_plane *const pd = &xd->plane[AOM_PLANE_Y];
494   uint8_t *dst =
495       &pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
496   (void)bsize;
497 #if CONFIG_CHROMA_SUB8X8
498 
499   if (block_size_high[bsize] == 4 || block_size_wide[bsize] == 4) {
500     // Only dimensions of size 4 can have an odd offset.
501     assert(!((col & 1) && tx_size_wide[tx_size] != 4));
502     assert(!((row & 1) && tx_size_high[tx_size] != 4));
503     sub8x8_adjust_offset(cfl, &row, &col);
504 #if CONFIG_DEBUG
505     sub8x8_set_val(cfl, row, col, tx_size_high_unit[tx_size],
506                    tx_size_wide_unit[tx_size]);
507 #endif  // CONFIG_DEBUG
508   }
509 #endif
510   cfl_store(cfl, dst, pd->dst.stride, row, col, tx_size_wide[tx_size],
511             tx_size_high[tx_size], get_bitdepth_data_path_index(xd));
512 }
513 
514 void cfl_store_block(MACROBLOCKD *const xd, BLOCK_SIZE bsize, TX_SIZE tx_size) {
515   CFL_CTX *const cfl = xd->cfl;
516   struct macroblockd_plane *const pd = &xd->plane[AOM_PLANE_Y];
517   int row = 0;
518   int col = 0;
519 #if CONFIG_CHROMA_SUB8X8
520   bsize = AOMMAX(BLOCK_4X4, bsize);
521   if (block_size_high[bsize] == 4 || block_size_wide[bsize] == 4) {
522     sub8x8_adjust_offset(cfl, &row, &col);
523 #if CONFIG_DEBUG
524     sub8x8_set_val(cfl, row, col, mi_size_high[bsize], mi_size_wide[bsize]);
525 #endif  // CONFIG_DEBUG
526   }
527 #endif  // CONFIG_CHROMA_SUB8X8
528   const int width = max_intra_block_width(xd, bsize, AOM_PLANE_Y, tx_size);
529   const int height = max_intra_block_height(xd, bsize, AOM_PLANE_Y, tx_size);
530   cfl_store(cfl, pd->dst.buf, pd->dst.stride, row, col, width, height,
531             get_bitdepth_data_path_index(xd));
532 }
533 
534 void cfl_compute_parameters(MACROBLOCKD *const xd, TX_SIZE tx_size) {
535   CFL_CTX *const cfl = xd->cfl;
536   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
537 
538   // Do not call cfl_compute_parameters multiple time on the same values.
539   assert(cfl->are_parameters_computed == 0);
540 
541 #if CONFIG_CHROMA_SUB8X8
542   const BLOCK_SIZE plane_bsize = AOMMAX(
543       BLOCK_4X4, get_plane_block_size(mbmi->sb_type, &xd->plane[AOM_PLANE_U]));
544 #if CONFIG_DEBUG
545   if (mbmi->sb_type < BLOCK_8X8) {
546     for (int val_r = 0; val_r < mi_size_high[mbmi->sb_type]; val_r++) {
547       for (int val_c = 0; val_c < mi_size_wide[mbmi->sb_type]; val_c++) {
548         assert(cfl->sub8x8_val[val_r * CFL_SUB8X8_VAL_MI_SIZE + val_c] == 1);
549       }
550     }
551     cfl_clear_sub8x8_val(cfl);
552   }
553 #endif  // CONFIG_DEBUG
554 #else
555   const BLOCK_SIZE plane_bsize =
556       get_plane_block_size(mbmi->sb_type, &xd->plane[AOM_PLANE_U]);
557 #endif
558   // AOM_PLANE_U is used, but both planes will have the same sizes.
559   cfl->uv_width = max_intra_block_width(xd, plane_bsize, AOM_PLANE_U, tx_size);
560   cfl->uv_height =
561       max_intra_block_height(xd, plane_bsize, AOM_PLANE_U, tx_size);
562 
563   assert(cfl->buf_width <= cfl->uv_width);
564   assert(cfl->buf_height <= cfl->uv_height);
565 
566   cfl_dc_pred(xd, plane_bsize);
567   cfl_subtract_averages(cfl, tx_size);
568   cfl->are_parameters_computed = 1;
569 }
570