Libav
h264_loopfilter.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... loop filter
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/internal.h"
29 #include "libavutil/intreadwrite.h"
30 #include "internal.h"
31 #include "avcodec.h"
32 #include "mpegvideo.h"
33 #include "h264.h"
34 #include "mathops.h"
35 #include "rectangle.h"
36 
37 #include <assert.h>
38 
39 /* Deblocking filter (p153) */
40 static const uint8_t alpha_table[52*3] = {
41  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
42  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
43  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
45  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46  0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
47  7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
48  25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
49  80, 90,101,113,127,144,162,182,203,226,
50  255,255,
51  255,255,255,255,255,255,255,255,255,255,255,255,255,
52  255,255,255,255,255,255,255,255,255,255,255,255,255,
53  255,255,255,255,255,255,255,255,255,255,255,255,255,
54  255,255,255,255,255,255,255,255,255,255,255,255,255,
55 };
56 static const uint8_t beta_table[52*3] = {
57  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62  0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
63  3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
64  8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
65  13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
66  18, 18,
67  18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
68  18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
69  18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
70  18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
71 };
72 static const uint8_t tc0_table[52*3][4] = {
73  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
74  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
75  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
76  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
77  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
78  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
79  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
80  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
81  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
82  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
83  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
84  {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
85  {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
86  {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
87  {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
88  {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
89  {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
90  {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
91  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
92  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
93  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
94  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
95  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
96  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
97  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
98  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
99  {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
100 };
101 
102 /* intra: 0 if this loopfilter call is guaranteed to be inter (bS < 4), 1 if it might be intra (bS == 4) */
104  const int16_t bS[4],
105  unsigned int qp, int a, int b,
106  H264Context *h, int intra)
107 {
108  const unsigned int index_a = qp + a;
109  const int alpha = alpha_table[index_a];
110  const int beta = beta_table[qp + b];
111  if (alpha ==0 || beta == 0) return;
112 
113  if( bS[0] < 4 || !intra ) {
114  int8_t tc[4];
115  tc[0] = tc0_table[index_a][bS[0]];
116  tc[1] = tc0_table[index_a][bS[1]];
117  tc[2] = tc0_table[index_a][bS[2]];
118  tc[3] = tc0_table[index_a][bS[3]];
119  h->h264dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
120  } else {
121  h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
122  }
123 }
124 
126  const int16_t bS[4],
127  unsigned int qp, int a, int b,
128  H264Context *h, int intra)
129 {
130  const unsigned int index_a = qp + a;
131  const int alpha = alpha_table[index_a];
132  const int beta = beta_table[qp + b];
133  if (alpha ==0 || beta == 0) return;
134 
135  if( bS[0] < 4 || !intra ) {
136  int8_t tc[4];
137  tc[0] = tc0_table[index_a][bS[0]]+1;
138  tc[1] = tc0_table[index_a][bS[1]]+1;
139  tc[2] = tc0_table[index_a][bS[2]]+1;
140  tc[3] = tc0_table[index_a][bS[3]]+1;
141  h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
142  } else {
143  h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
144  }
145 }
146 
148  int stride,
149  const int16_t bS[7], int bsi,
150  int qp, int a, int b,
151  int intra)
152 {
153  const unsigned int index_a = qp + a;
154  const int alpha = alpha_table[index_a];
155  const int beta = beta_table[qp + b];
156  if (alpha ==0 || beta == 0) return;
157 
158  if( bS[0] < 4 || !intra ) {
159  int8_t tc[4];
160  tc[0] = tc0_table[index_a][bS[0*bsi]];
161  tc[1] = tc0_table[index_a][bS[1*bsi]];
162  tc[2] = tc0_table[index_a][bS[2*bsi]];
163  tc[3] = tc0_table[index_a][bS[3*bsi]];
164  h->h264dsp.h264_h_loop_filter_luma_mbaff(pix, stride, alpha, beta, tc);
165  } else {
166  h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta);
167  }
168 }
169 
171  uint8_t *pix, int stride,
172  const int16_t bS[7],
173  int bsi, int qp, int a,
174  int b, int intra)
175 {
176  const unsigned int index_a = qp + a;
177  const int alpha = alpha_table[index_a];
178  const int beta = beta_table[qp + b];
179  if (alpha ==0 || beta == 0) return;
180 
181  if( bS[0] < 4 || !intra ) {
182  int8_t tc[4];
183  tc[0] = tc0_table[index_a][bS[0*bsi]] + 1;
184  tc[1] = tc0_table[index_a][bS[1*bsi]] + 1;
185  tc[2] = tc0_table[index_a][bS[2*bsi]] + 1;
186  tc[3] = tc0_table[index_a][bS[3*bsi]] + 1;
187  h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc);
188  } else {
189  h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta);
190  }
191 }
192 
194  const int16_t bS[4],
195  unsigned int qp, int a, int b,
196  H264Context *h, int intra)
197 {
198  const unsigned int index_a = qp + a;
199  const int alpha = alpha_table[index_a];
200  const int beta = beta_table[qp + b];
201  if (alpha ==0 || beta == 0) return;
202 
203  if( bS[0] < 4 || !intra ) {
204  int8_t tc[4];
205  tc[0] = tc0_table[index_a][bS[0]];
206  tc[1] = tc0_table[index_a][bS[1]];
207  tc[2] = tc0_table[index_a][bS[2]];
208  tc[3] = tc0_table[index_a][bS[3]];
209  h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
210  } else {
211  h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
212  }
213 }
214 
216  const int16_t bS[4],
217  unsigned int qp, int a, int b,
218  H264Context *h, int intra)
219 {
220  const unsigned int index_a = qp + a;
221  const int alpha = alpha_table[index_a];
222  const int beta = beta_table[qp + b];
223  if (alpha ==0 || beta == 0) return;
224 
225  if( bS[0] < 4 || !intra ) {
226  int8_t tc[4];
227  tc[0] = tc0_table[index_a][bS[0]]+1;
228  tc[1] = tc0_table[index_a][bS[1]]+1;
229  tc[2] = tc0_table[index_a][bS[2]]+1;
230  tc[3] = tc0_table[index_a][bS[3]]+1;
231  h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
232  } else {
233  h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
234  }
235 }
236 
238  int mb_x, int mb_y,
239  uint8_t *img_y,
240  uint8_t *img_cb,
241  uint8_t *img_cr,
242  unsigned int linesize,
243  unsigned int uvlinesize,
244  int pixel_shift)
245 {
246  int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
247  int chroma444 = CHROMA444(h);
248  int chroma422 = CHROMA422(h);
249 
250  int mb_xy = h->mb_xy;
251  int left_type= h->left_type[LTOP];
252  int top_type= h->top_type;
253 
254  int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
255  int a = 52 + h->slice_alpha_c0_offset - qp_bd_offset;
256  int b = 52 + h->slice_beta_offset - qp_bd_offset;
257 
258  int mb_type = h->cur_pic.mb_type[mb_xy];
259  int qp = h->cur_pic.qscale_table[mb_xy];
260  int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
261  int qp1 = h->cur_pic.qscale_table[h->top_mb_xy];
262  int qpc = get_chroma_qp( h, 0, qp );
263  int qpc0 = get_chroma_qp( h, 0, qp0 );
264  int qpc1 = get_chroma_qp( h, 0, qp1 );
265  qp0 = (qp + qp0 + 1) >> 1;
266  qp1 = (qp + qp1 + 1) >> 1;
267  qpc0 = (qpc + qpc0 + 1) >> 1;
268  qpc1 = (qpc + qpc1 + 1) >> 1;
269 
270  if( IS_INTRA(mb_type) ) {
271  static const int16_t bS4[4] = {4,4,4,4};
272  static const int16_t bS3[4] = {3,3,3,3};
273  const int16_t *bSH = FIELD_PICTURE(h) ? bS3 : bS4;
274  if(left_type)
275  filter_mb_edgev( &img_y[4*0<<pixel_shift], linesize, bS4, qp0, a, b, h, 1);
276  if( IS_8x8DCT(mb_type) ) {
277  filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
278  if(top_type){
279  filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
280  }
281  filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
282  } else {
283  filter_mb_edgev( &img_y[4*1<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
284  filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
285  filter_mb_edgev( &img_y[4*3<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
286  if(top_type){
287  filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
288  }
289  filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, a, b, h, 0);
290  filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
291  filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, a, b, h, 0);
292  }
293  if(chroma){
294  if(chroma444){
295  if(left_type){
296  filter_mb_edgev( &img_cb[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
297  filter_mb_edgev( &img_cr[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
298  }
299  if( IS_8x8DCT(mb_type) ) {
300  filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
301  filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
302  if(top_type){
303  filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
304  filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
305  }
306  filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
307  filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
308  } else {
309  filter_mb_edgev( &img_cb[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
310  filter_mb_edgev( &img_cr[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
311  filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
312  filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
313  filter_mb_edgev( &img_cb[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
314  filter_mb_edgev( &img_cr[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
315  if(top_type){
316  filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
317  filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
318  }
319  filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
320  filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
321  filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
322  filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
323  filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
324  filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
325  }
326  }else if(chroma422){
327  if(left_type){
328  filter_mb_edgecv(&img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
329  filter_mb_edgecv(&img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
330  }
331  filter_mb_edgecv(&img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
332  filter_mb_edgecv(&img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
333  if(top_type){
334  filter_mb_edgech(&img_cb[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
335  filter_mb_edgech(&img_cr[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
336  }
337  filter_mb_edgech(&img_cb[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
338  filter_mb_edgech(&img_cr[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
339  filter_mb_edgech(&img_cb[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
340  filter_mb_edgech(&img_cr[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
341  filter_mb_edgech(&img_cb[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
342  filter_mb_edgech(&img_cr[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
343  }else{
344  if(left_type){
345  filter_mb_edgecv( &img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
346  filter_mb_edgecv( &img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
347  }
348  filter_mb_edgecv( &img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
349  filter_mb_edgecv( &img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
350  if(top_type){
351  filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
352  filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
353  }
354  filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
355  filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
356  }
357  }
358  return;
359  } else {
360  LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]);
361  int edges;
362  if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 && !chroma444 ) {
363  edges = 4;
364  AV_WN64A(bS[0][0], 0x0002000200020002ULL);
365  AV_WN64A(bS[0][2], 0x0002000200020002ULL);
366  AV_WN64A(bS[1][0], 0x0002000200020002ULL);
367  AV_WN64A(bS[1][2], 0x0002000200020002ULL);
368  } else {
369  int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0;
370  int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[LTOP] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0;
371  int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1;
372  edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
374  h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE(h));
375  }
376  if( IS_INTRA(left_type) )
377  AV_WN64A(bS[0][0], 0x0004000400040004ULL);
378  if( IS_INTRA(top_type) )
379  AV_WN64A(bS[1][0], FIELD_PICTURE(h) ? 0x0003000300030003ULL : 0x0004000400040004ULL);
380 
381 #define FILTER(hv,dir,edge,intra)\
382  if(AV_RN64A(bS[dir][edge])) { \
383  filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qp : qp##dir, a, b, h, intra );\
384  if(chroma){\
385  if(chroma444){\
386  filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
387  filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
388  } else if(!(edge&1)) {\
389  filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
390  filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
391  }\
392  }\
393  }
394  if(left_type)
395  FILTER(v,0,0,1);
396  if( edges == 1 ) {
397  if(top_type)
398  FILTER(h,1,0,1);
399  } else if( IS_8x8DCT(mb_type) ) {
400  FILTER(v,0,2,0);
401  if(top_type)
402  FILTER(h,1,0,1);
403  FILTER(h,1,2,0);
404  } else {
405  FILTER(v,0,1,0);
406  FILTER(v,0,2,0);
407  FILTER(v,0,3,0);
408  if(top_type)
409  FILTER(h,1,0,1);
410  FILTER(h,1,1,0);
411  FILTER(h,1,2,0);
412  FILTER(h,1,3,0);
413  }
414 #undef FILTER
415  }
416 }
417 
418 void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
419  assert(!FRAME_MBAFF(h));
421  ff_h264_filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
422  return;
423  }
424 
425 #if CONFIG_SMALL
426  h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, h->pixel_shift);
427 #else
428  if(h->pixel_shift){
429  h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 1);
430  }else{
431  h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 0);
432  }
433 #endif
434 }
435 
436 static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){
437  int v;
438 
439  v= h->ref_cache[0][b_idx] != h->ref_cache[0][bn_idx];
440  if(!v && h->ref_cache[0][b_idx]!=-1)
441  v= h->mv_cache[0][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
442  FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
443 
444  if(h->list_count==2){
445  if(!v)
446  v = h->ref_cache[1][b_idx] != h->ref_cache[1][bn_idx] |
447  h->mv_cache[1][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
448  FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit;
449 
450  if(v){
451  if(h->ref_cache[0][b_idx] != h->ref_cache[1][bn_idx] |
452  h->ref_cache[1][b_idx] != h->ref_cache[0][bn_idx])
453  return 1;
454  return
455  h->mv_cache[0][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
456  FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit |
457  h->mv_cache[1][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
458  FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
459  }
460  }
461 
462  return v;
463 }
464 
465 static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir) {
466  int edge;
467  int chroma_qp_avg[2];
468  int chroma444 = CHROMA444(h);
469  int chroma422 = CHROMA422(h);
470  const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
471  const int mbm_type = dir == 0 ? h->left_type[LTOP] : h->top_type;
472 
473  // how often to recheck mv-based bS when iterating between edges
474  static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1},
475  {0,3,1,1,3,3,3,3}};
476  const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7];
477  const int edges = mask_edge== 3 && !(h->cbp&15) ? 1 : 4;
478 
479  // how often to recheck mv-based bS when iterating along each edge
480  const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir));
481 
482  if(mbm_type && !first_vertical_edge_done){
483 
484  if (FRAME_MBAFF(h) && (dir == 1) && ((mb_y&1) == 0)
485  && IS_INTERLACED(mbm_type&~mb_type)
486  ) {
487  // This is a special case in the norm where the filtering must
488  // be done twice (one each of the field) even if we are in a
489  // frame macroblock.
490  //
491  unsigned int tmp_linesize = 2 * linesize;
492  unsigned int tmp_uvlinesize = 2 * uvlinesize;
493  int mbn_xy = mb_xy - 2 * h->mb_stride;
494  int j;
495 
496  for(j=0; j<2; j++, mbn_xy += h->mb_stride){
497  DECLARE_ALIGNED(8, int16_t, bS)[4];
498  int qp;
499  if (IS_INTRA(mb_type | h->cur_pic.mb_type[mbn_xy])) {
500  AV_WN64A(bS, 0x0003000300030003ULL);
501  } else {
502  if (!CABAC(h) && IS_8x8DCT(h->cur_pic.mb_type[mbn_xy])) {
503  bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
504  bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
505  bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
506  bS[3]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+3]);
507  }else{
508  const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 3*4;
509  int i;
510  for( i = 0; i < 4; i++ ) {
511  bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]);
512  }
513  }
514  }
515  // Do not use s->qscale as luma quantizer because it has not the same
516  // value in IPCM macroblocks.
517  qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbn_xy] + 1) >> 1;
518  tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
519  { int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
520  filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
521  chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
522  chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
523  if (chroma) {
524  if (chroma444) {
525  filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
526  filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
527  } else {
528  filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
529  filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
530  }
531  }
532  }
533  }else{
534  DECLARE_ALIGNED(8, int16_t, bS)[4];
535  int qp;
536 
537  if( IS_INTRA(mb_type|mbm_type)) {
538  AV_WN64A(bS, 0x0003000300030003ULL);
539  if ( (!IS_INTERLACED(mb_type|mbm_type))
540  || ((FRAME_MBAFF(h) || (h->picture_structure != PICT_FRAME)) && (dir == 0))
541  )
542  AV_WN64A(bS, 0x0004000400040004ULL);
543  } else {
544  int i;
545  int mv_done;
546 
547  if( dir && FRAME_MBAFF(h) && IS_INTERLACED(mb_type ^ mbm_type)) {
548  AV_WN64A(bS, 0x0001000100010001ULL);
549  mv_done = 1;
550  }
551  else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
552  int b_idx= 8 + 4;
553  int bn_idx= b_idx - (dir ? 8:1);
554 
555  bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, mvy_limit);
556  mv_done = 1;
557  }
558  else
559  mv_done = 0;
560 
561  for( i = 0; i < 4; i++ ) {
562  int x = dir == 0 ? 0 : i;
563  int y = dir == 0 ? i : 0;
564  int b_idx= 8 + 4 + x + 8*y;
565  int bn_idx= b_idx - (dir ? 8:1);
566 
567  if( h->non_zero_count_cache[b_idx] |
568  h->non_zero_count_cache[bn_idx] ) {
569  bS[i] = 2;
570  }
571  else if(!mv_done)
572  {
573  bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
574  }
575  }
576  }
577 
578  /* Filter edge */
579  // Do not use s->qscale as luma quantizer because it has not the same
580  // value in IPCM macroblocks.
581  if(bS[0]+bS[1]+bS[2]+bS[3]){
582  qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
583  tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
584  chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
585  chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
586  if( dir == 0 ) {
587  filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
588  if (chroma) {
589  if (chroma444) {
590  filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
591  filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
592  } else {
593  filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
594  filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
595  }
596  }
597  } else {
598  filter_mb_edgeh( &img_y[0], linesize, bS, qp, a, b, h, 1 );
599  if (chroma) {
600  if (chroma444) {
601  filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
602  filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
603  } else {
604  filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
605  filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
606  }
607  }
608  }
609  }
610  }
611  }
612 
613  /* Calculate bS */
614  for( edge = 1; edge < edges; edge++ ) {
615  DECLARE_ALIGNED(8, int16_t, bS)[4];
616  int qp;
617  const int deblock_edge = !IS_8x8DCT(mb_type & (edge<<24)); // (edge&1) && IS_8x8DCT(mb_type)
618 
619  if (!deblock_edge && (!chroma422 || dir == 0))
620  continue;
621 
622  if( IS_INTRA(mb_type)) {
623  AV_WN64A(bS, 0x0003000300030003ULL);
624  } else {
625  int i;
626  int mv_done;
627 
628  if( edge & mask_edge ) {
629  AV_ZERO64(bS);
630  mv_done = 1;
631  }
632  else if( mask_par0 ) {
633  int b_idx= 8 + 4 + edge * (dir ? 8:1);
634  int bn_idx= b_idx - (dir ? 8:1);
635 
636  bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_limit);
637  mv_done = 1;
638  }
639  else
640  mv_done = 0;
641 
642  for( i = 0; i < 4; i++ ) {
643  int x = dir == 0 ? edge : i;
644  int y = dir == 0 ? i : edge;
645  int b_idx= 8 + 4 + x + 8*y;
646  int bn_idx= b_idx - (dir ? 8:1);
647 
648  if( h->non_zero_count_cache[b_idx] |
649  h->non_zero_count_cache[bn_idx] ) {
650  bS[i] = 2;
651  }
652  else if(!mv_done)
653  {
654  bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
655  }
656  }
657 
658  if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
659  continue;
660  }
661 
662  /* Filter edge */
663  // Do not use s->qscale as luma quantizer because it has not the same
664  // value in IPCM macroblocks.
665  qp = h->cur_pic.qscale_table[mb_xy];
666  tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
667  if( dir == 0 ) {
668  filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 );
669  if (chroma) {
670  if (chroma444) {
671  filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
672  filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
673  } else if( (edge&1) == 0 ) {
674  filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
675  filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
676  }
677  }
678  } else {
679  if (chroma422) {
680  if (deblock_edge)
681  filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
682  if (chroma) {
683  filter_mb_edgech(&img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
684  filter_mb_edgech(&img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
685  }
686  } else {
687  filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
688  if (chroma) {
689  if (chroma444) {
690  filter_mb_edgeh (&img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
691  filter_mb_edgeh (&img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
692  } else if ((edge&1) == 0) {
693  filter_mb_edgech(&img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
694  filter_mb_edgech(&img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
695  }
696  }
697  }
698  }
699  }
700 }
701 
702 void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
703  const int mb_xy= mb_x + mb_y*h->mb_stride;
704  const int mb_type = h->cur_pic.mb_type[mb_xy];
705  const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
706  int first_vertical_edge_done = 0;
707  av_unused int dir;
708  int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
709  int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
710  int a = 52 + h->slice_alpha_c0_offset - qp_bd_offset;
711  int b = 52 + h->slice_beta_offset - qp_bd_offset;
712 
713  if (FRAME_MBAFF(h)
714  // and current and left pair do not have the same interlaced type
715  && IS_INTERLACED(mb_type^h->left_type[LTOP])
716  // and left mb is in available to us
717  && h->left_type[LTOP]) {
718  /* First vertical edge is different in MBAFF frames
719  * There are 8 different bS to compute and 2 different Qp
720  */
721  DECLARE_ALIGNED(8, int16_t, bS)[8];
722  int qp[2];
723  int bqp[2];
724  int rqp[2];
725  int mb_qp, mbn0_qp, mbn1_qp;
726  int i;
727  first_vertical_edge_done = 1;
728 
729  if( IS_INTRA(mb_type) ) {
730  AV_WN64A(&bS[0], 0x0004000400040004ULL);
731  AV_WN64A(&bS[4], 0x0004000400040004ULL);
732  } else {
733  static const uint8_t offset[2][2][8]={
734  {
735  {3+4*0, 3+4*0, 3+4*0, 3+4*0, 3+4*1, 3+4*1, 3+4*1, 3+4*1},
736  {3+4*2, 3+4*2, 3+4*2, 3+4*2, 3+4*3, 3+4*3, 3+4*3, 3+4*3},
737  },{
738  {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
739  {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
740  }
741  };
742  const uint8_t *off= offset[MB_FIELD(h)][mb_y&1];
743  for( i = 0; i < 8; i++ ) {
744  int j= MB_FIELD(h) ? i>>2 : i&1;
745  int mbn_xy = h->left_mb_xy[LEFT(j)];
746  int mbn_type= h->left_type[LEFT(j)];
747 
748  if( IS_INTRA( mbn_type ) )
749  bS[i] = 4;
750  else{
751  bS[i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] |
752  ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ?
753  (h->cbp_table[mbn_xy] & (((MB_FIELD(h) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12))
754  :
755  h->non_zero_count[mbn_xy][ off[i] ]));
756  }
757  }
758  }
759 
760  mb_qp = h->cur_pic.qscale_table[mb_xy];
761  mbn0_qp = h->cur_pic.qscale_table[h->left_mb_xy[0]];
762  mbn1_qp = h->cur_pic.qscale_table[h->left_mb_xy[1]];
763  qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
764  bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
765  get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
766  rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) +
767  get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1;
768  qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
769  bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) +
770  get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1;
771  rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) +
772  get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1;
773 
774  /* Filter edge */
775  tprintf(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
776  { int i; for (i = 0; i < 8; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
777  if (MB_FIELD(h)) {
778  filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 );
779  filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 );
780  if (chroma){
781  if (CHROMA444(h)) {
782  filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
783  filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
784  filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
785  filter_mb_mbaff_edgev ( h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
786  } else if (CHROMA422(h)) {
787  filter_mb_mbaff_edgecv(h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1);
788  filter_mb_mbaff_edgecv(h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1);
789  filter_mb_mbaff_edgecv(h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1);
790  filter_mb_mbaff_edgecv(h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1);
791  }else{
792  filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
793  filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
794  filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
795  filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
796  }
797  }
798  }else{
799  filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 );
800  filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 );
801  if (chroma){
802  if (CHROMA444(h)) {
803  filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
804  filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
805  filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
806  filter_mb_mbaff_edgev ( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
807  }else{
808  filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
809  filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
810  filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
811  filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
812  }
813  }
814  }
815  }
816 
817 #if CONFIG_SMALL
818  for( dir = 0; dir < 2; dir++ )
819  filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, a, b, chroma, dir);
820 #else
821  filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, a, b, chroma, 0);
822  filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, a, b, chroma, 1);
823 #endif
824 }
ptrdiff_t uvlinesize
Definition: h264.h:281
#define CHROMA444(h)
Definition: h264.h:89
int cbp
Definition: h264.h:428
int left_type[LEFT_MBS]
Definition: h264.h:309
void(* h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:54
uint16_t * cbp_table
Definition: h264.h:427
int mb_y
Definition: h264.h:454
static const uint8_t alpha_table[52 *3]
void(* h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:58
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:58
int flags
Definition: h264.h:291
mpegvideo header.
void(* h264_h_loop_filter_chroma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:64
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:334
H264Context.
Definition: h264.h:258
int chroma_qp_diff
Definition: h264.h:230
int stride
Definition: mace.c:144
int picture_structure
Definition: h264.h:375
void(* h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:62
Definition: vf_drawbox.c:37
#define IS_8x8DCT(a)
Definition: h264.h:96
uint8_t
#define PICT_FRAME
Definition: mpegvideo.h:646
void(* h264_h_loop_filter_luma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:51
#define b
Definition: input.c:52
int cabac
entropy_coding_mode_flag
Definition: h264.h:213
int mb_xy
Definition: h264.h:461
int mb_x
Definition: h264.h:454
static av_always_inline void filter_mb_edgech(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
int left_mb_xy[LEFT_MBS]
Definition: h264.h:304
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
int top_mb_xy
Definition: h264.h:302
void(* h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:69
#define IS_INTERLACED(a)
Definition: mpegvideo.h:165
void(* h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:60
#define LOCAL_ALIGNED_8(t, v,...)
Definition: internal.h:100
H.264 / AVC / MPEG4 part10 codec.
static av_always_inline void filter_mb_mbaff_edgecv(H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra)
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
Definition: h264.h:848
#define MB_FIELD(h)
Definition: h264.h:63
PPS pps
current pps
Definition: h264.h:358
#define CABAC(h)
Definition: h264.h:85
int off
Definition: dsputil_bfin.c:29
static const uint8_t scan8[16 *3+3]
Definition: h264.h:811
common internal API header
useful rectangle filling function
ptrdiff_t linesize
Definition: h264.h:281
#define FIELD_PICTURE(h)
Definition: h264.h:65
void(* h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:67
#define CONFIG_GRAY
Definition: config.h:330
SPS sps
current sps
Definition: h264.h:357
#define FFABS(a)
Definition: common.h:52
#define AV_WN64A(p, v)
Definition: intreadwrite.h:462
int top_type
Definition: h264.h:307
unsigned int list_count
Definition: h264.h:402
static av_always_inline void h264_filter_mb_fast_internal(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int pixel_shift)
Picture cur_pic
Definition: h264.h:272
int mb_stride
Definition: h264.h:459
AVCodecContext * avctx
Definition: h264.h:259
#define MB_TYPE_8x16
Definition: avcodec.h:807
Libavcodec external API header.
int slice_alpha_c0_offset
Definition: h264.h:467
#define LTOP
Definition: h264.h:67
void(* h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:47
static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit)
static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir)
static const uint8_t tc0_table[52 *3][4]
int slice_beta_offset
Definition: h264.h:468
#define CHROMA422(h)
Definition: h264.h:88
static av_always_inline void filter_mb_mbaff_edgev(H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra)
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:274
#define MB_TYPE_16x16
Definition: avcodec.h:805
static int step
Definition: avplay.c:247
static av_always_inline void filter_mb_edgeh(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
void(* h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:71
int8_t * qscale_table
Definition: mpegvideo.h:104
static av_always_inline void filter_mb_edgev(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
#define AV_ZERO64(d)
Definition: intreadwrite.h:538
#define tprintf(p,...)
Definition: get_bits.h:626
common internal api header.
void(* h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:56
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:670
#define FILTER(hv, dir, edge, intra)
int chroma_qp[2]
Definition: h264.h:275
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:201
#define IS_INTRA(x, y)
void(* h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field)
Definition: h264dsp.h:74
#define FRAME_MBAFF(h)
Definition: h264.h:64
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:327
void(* h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:49
H264DSPContext h264dsp
Definition: h264.h:262
uint32_t * mb_type
Definition: mpegvideo.h:110
#define av_always_inline
Definition: attributes.h:40
static av_always_inline void filter_mb_edgecv(uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra)
#define LEFT
Definition: cdgraphics.c:163
uint8_t(* non_zero_count)[48]
Definition: h264.h:329
static const uint8_t beta_table[52 *3]
#define av_unused
Definition: attributes.h:86
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
int8_t ref_cache[2][5 *8]
Definition: h264.h:335