h264_mvpred.h
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... motion vector predicion
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #ifndef AVCODEC_H264_MVPRED_H
29 #define AVCODEC_H264_MVPRED_H
30 
31 #include "internal.h"
32 #include "avcodec.h"
33 #include "h264.h"
34 
35 //#undef NDEBUG
36 #include <assert.h>
37 
38 static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, int list, int part_width){
39  const int topright_ref= h->ref_cache[list][ i - 8 + part_width ];
40  MpegEncContext *s = &h->s;
41 
42  /* there is no consistent mapping of mvs to neighboring locations that will
43  * make mbaff happy, so we can't move all this logic to fill_caches */
44  if(FRAME_MBAFF){
45 
46 #define SET_DIAG_MV(MV_OP, REF_OP, XY, Y4)\
47  const int xy = XY, y4 = Y4;\
48  const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\
49  if(!USES_LIST(mb_type,list))\
50  return LIST_NOT_USED;\
51  mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4*h->b_stride];\
52  h->mv_cache[list][scan8[0]-2][0] = mv[0];\
53  h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
54  return s->current_picture_ptr->f.ref_index[list][4*xy + 1 + (y4 & ~1)] REF_OP;
55 
56  if(topright_ref == PART_NOT_AVAILABLE
57  && i >= scan8[0]+8 && (i&7)==4
58  && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
59  const uint32_t *mb_types = s->current_picture_ptr->f.mb_type;
60  const int16_t *mv;
61  AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
62  *C = h->mv_cache[list][scan8[0]-2];
63 
64  if(!MB_FIELD
65  && IS_INTERLACED(h->left_type[0])){
66  SET_DIAG_MV(*2, >>1, h->left_mb_xy[0]+s->mb_stride, (s->mb_y&1)*2+(i>>5));
67  }
68  if(MB_FIELD
69  && !IS_INTERLACED(h->left_type[0])){
70  // left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
71  SET_DIAG_MV(/2, <<1, h->left_mb_xy[i>=36], ((i>>2))&3);
72  }
73  }
74 #undef SET_DIAG_MV
75  }
76 
77  if(topright_ref != PART_NOT_AVAILABLE){
78  *C= h->mv_cache[list][ i - 8 + part_width ];
79  return topright_ref;
80  }else{
81  tprintf(s->avctx, "topright MV not available\n");
82 
83  *C= h->mv_cache[list][ i - 8 - 1 ];
84  return h->ref_cache[list][ i - 8 - 1 ];
85  }
86 }
87 
95 static av_always_inline void pred_motion(H264Context * const h, int n, int part_width, int list, int ref, int * const mx, int * const my){
96  const int index8= scan8[n];
97  const int top_ref= h->ref_cache[list][ index8 - 8 ];
98  const int left_ref= h->ref_cache[list][ index8 - 1 ];
99  const int16_t * const A= h->mv_cache[list][ index8 - 1 ];
100  const int16_t * const B= h->mv_cache[list][ index8 - 8 ];
101  const int16_t * C;
102  int diagonal_ref, match_count;
103 
104  assert(part_width==1 || part_width==2 || part_width==4);
105 
106 /* mv_cache
107  B . . A T T T T
108  U . . L . . , .
109  U . . L . . . .
110  U . . L . . , .
111  . . . L . . . .
112 */
113 
114  diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width);
115  match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref);
116  tprintf(h->s.avctx, "pred_motion match_count=%d\n", match_count);
117  if(match_count > 1){ //most common
118  *mx= mid_pred(A[0], B[0], C[0]);
119  *my= mid_pred(A[1], B[1], C[1]);
120  }else if(match_count==1){
121  if(left_ref==ref){
122  *mx= A[0];
123  *my= A[1];
124  }else if(top_ref==ref){
125  *mx= B[0];
126  *my= B[1];
127  }else{
128  *mx= C[0];
129  *my= C[1];
130  }
131  }else{
132  if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){
133  *mx= A[0];
134  *my= A[1];
135  }else{
136  *mx= mid_pred(A[0], B[0], C[0]);
137  *my= mid_pred(A[1], B[1], C[1]);
138  }
139  }
140 
141  tprintf(h->s.avctx, "pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);
142 }
143 
150 static av_always_inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
151  if(n==0){
152  const int top_ref= h->ref_cache[list][ scan8[0] - 8 ];
153  const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
154 
155  tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list);
156 
157  if(top_ref == ref){
158  *mx= B[0];
159  *my= B[1];
160  return;
161  }
162  }else{
163  const int left_ref= h->ref_cache[list][ scan8[8] - 1 ];
164  const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ];
165 
166  tprintf(h->s.avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
167 
168  if(left_ref == ref){
169  *mx= A[0];
170  *my= A[1];
171  return;
172  }
173  }
174 
175  //RARE
176  pred_motion(h, n, 4, list, ref, mx, my);
177 }
178 
185 static av_always_inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){
186  if(n==0){
187  const int left_ref= h->ref_cache[list][ scan8[0] - 1 ];
188  const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
189 
190  tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list);
191 
192  if(left_ref == ref){
193  *mx= A[0];
194  *my= A[1];
195  return;
196  }
197  }else{
198  const int16_t * C;
199  int diagonal_ref;
200 
201  diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2);
202 
203  tprintf(h->s.avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list);
204 
205  if(diagonal_ref == ref){
206  *mx= C[0];
207  *my= C[1];
208  return;
209  }
210  }
211 
212  //RARE
213  pred_motion(h, n, 2, list, ref, mx, my);
214 }
215 
216 #define FIX_MV_MBAFF(type, refn, mvn, idx)\
217  if(FRAME_MBAFF){\
218  if(MB_FIELD){\
219  if(!IS_INTERLACED(type)){\
220  refn <<= 1;\
221  AV_COPY32(mvbuf[idx], mvn);\
222  mvbuf[idx][1] /= 2;\
223  mvn = mvbuf[idx];\
224  }\
225  }else{\
226  if(IS_INTERLACED(type)){\
227  refn >>= 1;\
228  AV_COPY32(mvbuf[idx], mvn);\
229  mvbuf[idx][1] <<= 1;\
230  mvn = mvbuf[idx];\
231  }\
232  }\
233  }
234 
236  DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
237  DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
238  MpegEncContext * const s = &h->s;
239  int8_t *ref = s->current_picture.f.ref_index[0];
240  int16_t (*mv)[2] = s->current_picture.f.motion_val[0];
241  int top_ref, left_ref, diagonal_ref, match_count, mx, my;
242  const int16_t *A, *B, *C;
243  int b_stride = h->b_stride;
244 
245  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
246 
247  /* To avoid doing an entire fill_decode_caches, we inline the relevant parts here.
248  * FIXME: this is a partial duplicate of the logic in fill_decode_caches, but it's
249  * faster this way. Is there a way to avoid this duplication?
250  */
251  if(USES_LIST(h->left_type[LTOP], 0)){
252  left_ref = ref[4*h->left_mb_xy[LTOP] + 1 + (h->left_block[0]&~1)];
253  A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride*h->left_block[0]];
254  FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0);
255  if(!(left_ref | AV_RN32A(A))){
256  goto zeromv;
257  }
258  }else if(h->left_type[LTOP]){
259  left_ref = LIST_NOT_USED;
260  A = zeromv;
261  }else{
262  goto zeromv;
263  }
264 
265  if(USES_LIST(h->top_type, 0)){
266  top_ref = ref[4*h->top_mb_xy + 2];
267  B = mv[h->mb2b_xy[h->top_mb_xy] + 3*b_stride];
268  FIX_MV_MBAFF(h->top_type, top_ref, B, 1);
269  if(!(top_ref | AV_RN32A(B))){
270  goto zeromv;
271  }
272  }else if(h->top_type){
273  top_ref = LIST_NOT_USED;
274  B = zeromv;
275  }else{
276  goto zeromv;
277  }
278 
279  tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y);
280 
281  if(USES_LIST(h->topright_type, 0)){
282  diagonal_ref = ref[4*h->topright_mb_xy + 2];
283  C = mv[h->mb2b_xy[h->topright_mb_xy] + 3*b_stride];
284  FIX_MV_MBAFF(h->topright_type, diagonal_ref, C, 2);
285  }else if(h->topright_type){
286  diagonal_ref = LIST_NOT_USED;
287  C = zeromv;
288  }else{
289  if(USES_LIST(h->topleft_type, 0)){
290  diagonal_ref = ref[4*h->topleft_mb_xy + 1 + (h->topleft_partition & 2)];
291  C = mv[h->mb2b_xy[h->topleft_mb_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride)];
292  FIX_MV_MBAFF(h->topleft_type, diagonal_ref, C, 2);
293  }else if(h->topleft_type){
294  diagonal_ref = LIST_NOT_USED;
295  C = zeromv;
296  }else{
297  diagonal_ref = PART_NOT_AVAILABLE;
298  C = zeromv;
299  }
300  }
301 
302  match_count= !diagonal_ref + !top_ref + !left_ref;
303  tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count);
304  if(match_count > 1){
305  mx = mid_pred(A[0], B[0], C[0]);
306  my = mid_pred(A[1], B[1], C[1]);
307  }else if(match_count==1){
308  if(!left_ref){
309  mx = A[0];
310  my = A[1];
311  }else if(!top_ref){
312  mx = B[0];
313  my = B[1];
314  }else{
315  mx = C[0];
316  my = C[1];
317  }
318  }else{
319  mx = mid_pred(A[0], B[0], C[0]);
320  my = mid_pred(A[1], B[1], C[1]);
321  }
322 
323  fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
324  return;
325 zeromv:
326  fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
327  return;
328 }
329 
330 static void fill_decode_neighbors(H264Context *h, int mb_type){
331  MpegEncContext * const s = &h->s;
332  const int mb_xy= h->mb_xy;
333  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
334  static const uint8_t left_block_options[4][32]={
335  {0,1,2,3,7,10,8,11,3+0*4, 3+1*4, 3+2*4, 3+3*4, 1+4*4, 1+8*4, 1+5*4, 1+9*4},
336  {2,2,3,3,8,11,8,11,3+2*4, 3+2*4, 3+3*4, 3+3*4, 1+5*4, 1+9*4, 1+5*4, 1+9*4},
337  {0,0,1,1,7,10,7,10,3+0*4, 3+0*4, 3+1*4, 3+1*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4},
338  {0,2,0,2,7,10,7,10,3+0*4, 3+2*4, 3+0*4, 3+2*4, 1+4*4, 1+8*4, 1+4*4, 1+8*4}
339  };
340 
341  h->topleft_partition= -1;
342 
343  top_xy = mb_xy - (s->mb_stride << MB_FIELD);
344 
345  /* Wow, what a mess, why didn't they simplify the interlacing & intra
346  * stuff, I can't imagine that these complex rules are worth it. */
347 
348  topleft_xy = top_xy - 1;
349  topright_xy= top_xy + 1;
350  left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
351  h->left_block = left_block_options[0];
352  if(FRAME_MBAFF){
353  const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
354  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
355  if(s->mb_y&1){
356  if (left_mb_field_flag != curr_mb_field_flag) {
357  left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
358  if (curr_mb_field_flag) {
359  left_xy[LBOT] += s->mb_stride;
360  h->left_block = left_block_options[3];
361  } else {
362  topleft_xy += s->mb_stride;
363  // take top left mv from the middle of the mb, as opposed to all other modes which use the bottom right partition
364  h->topleft_partition = 0;
365  h->left_block = left_block_options[1];
366  }
367  }
368  }else{
369  if(curr_mb_field_flag){
370  topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
371  topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
372  top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
373  }
374  if (left_mb_field_flag != curr_mb_field_flag) {
375  if (curr_mb_field_flag) {
376  left_xy[LBOT] += s->mb_stride;
377  h->left_block = left_block_options[3];
378  } else {
379  h->left_block = left_block_options[2];
380  }
381  }
382  }
383  }
384 
385  h->topleft_mb_xy = topleft_xy;
386  h->top_mb_xy = top_xy;
387  h->topright_mb_xy= topright_xy;
388  h->left_mb_xy[LTOP] = left_xy[LTOP];
389  h->left_mb_xy[LBOT] = left_xy[LBOT];
390  //FIXME do we need all in the context?
391 
392  h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
393  h->top_type = s->current_picture.f.mb_type[top_xy];
394  h->topright_type = s->current_picture.f.mb_type[topright_xy];
395  h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
396  h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
397 
398  if(FMO){
399  if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
400  if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
401  if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
402  }else{
403  if(h->slice_table[topleft_xy ] != h->slice_num){
404  h->topleft_type = 0;
405  if(h->slice_table[top_xy ] != h->slice_num) h->top_type = 0;
406  if(h->slice_table[left_xy[LTOP] ] != h->slice_num) h->left_type[LTOP] = h->left_type[LBOT] = 0;
407  }
408  }
409  if(h->slice_table[topright_xy] != h->slice_num) h->topright_type= 0;
410 }
411 
412 static void fill_decode_caches(H264Context *h, int mb_type){
413  MpegEncContext * const s = &h->s;
414  int topleft_xy, top_xy, topright_xy, left_xy[LEFT_MBS];
415  int topleft_type, top_type, topright_type, left_type[LEFT_MBS];
416  const uint8_t * left_block= h->left_block;
417  int i;
418  uint8_t *nnz;
419  uint8_t *nnz_cache;
420 
421  topleft_xy = h->topleft_mb_xy;
422  top_xy = h->top_mb_xy;
423  topright_xy = h->topright_mb_xy;
424  left_xy[LTOP] = h->left_mb_xy[LTOP];
425  left_xy[LBOT] = h->left_mb_xy[LBOT];
426  topleft_type = h->topleft_type;
427  top_type = h->top_type;
428  topright_type = h->topright_type;
429  left_type[LTOP]= h->left_type[LTOP];
430  left_type[LBOT]= h->left_type[LBOT];
431 
432  if(!IS_SKIP(mb_type)){
433  if(IS_INTRA(mb_type)){
434  int type_mask= h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
437  h->left_samples_available= 0xFFFF;
438  h->topright_samples_available= 0xEEEA;
439 
440  if(!(top_type & type_mask)){
441  h->topleft_samples_available= 0xB3FF;
442  h->top_samples_available= 0x33FF;
443  h->topright_samples_available= 0x26EA;
444  }
445  if(IS_INTERLACED(mb_type) != IS_INTERLACED(left_type[LTOP])){
446  if(IS_INTERLACED(mb_type)){
447  if(!(left_type[LTOP] & type_mask)){
448  h->topleft_samples_available&= 0xDFFF;
449  h->left_samples_available&= 0x5FFF;
450  }
451  if(!(left_type[LBOT] & type_mask)){
452  h->topleft_samples_available&= 0xFF5F;
453  h->left_samples_available&= 0xFF5F;
454  }
455  }else{
456  int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
457 
458  assert(left_xy[LTOP] == left_xy[LBOT]);
459  if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
460  h->topleft_samples_available&= 0xDF5F;
461  h->left_samples_available&= 0x5F5F;
462  }
463  }
464  }else{
465  if(!(left_type[LTOP] & type_mask)){
466  h->topleft_samples_available&= 0xDF5F;
467  h->left_samples_available&= 0x5F5F;
468  }
469  }
470 
471  if(!(topleft_type & type_mask))
472  h->topleft_samples_available&= 0x7FFF;
473 
474  if(!(topright_type & type_mask))
475  h->topright_samples_available&= 0xFBFF;
476 
477  if(IS_INTRA4x4(mb_type)){
478  if(IS_INTRA4x4(top_type)){
480  }else{
481  h->intra4x4_pred_mode_cache[4+8*0]=
482  h->intra4x4_pred_mode_cache[5+8*0]=
483  h->intra4x4_pred_mode_cache[6+8*0]=
484  h->intra4x4_pred_mode_cache[7+8*0]= 2 - 3*!(top_type & type_mask);
485  }
486  for(i=0; i<2; i++){
487  if(IS_INTRA4x4(left_type[LEFT(i)])){
488  int8_t *mode= h->intra4x4_pred_mode + h->mb2br_xy[left_xy[LEFT(i)]];
489  h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= mode[6-left_block[0+2*i]];
490  h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= mode[6-left_block[1+2*i]];
491  }else{
492  h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]=
493  h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= 2 - 3*!(left_type[LEFT(i)] & type_mask);
494  }
495  }
496  }
497  }
498 
499 
500 /*
501 0 . T T. T T T T
502 1 L . .L . . . .
503 2 L . .L . . . .
504 3 . T TL . . . .
505 4 L . .L . . . .
506 5 L . .. . . . .
507 */
508 //FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec)
509  nnz_cache = h->non_zero_count_cache;
510  if(top_type){
511  nnz = h->non_zero_count[top_xy];
512  AV_COPY32(&nnz_cache[4+8* 0], &nnz[4*3]);
513  if(!s->chroma_y_shift){
514  AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 7]);
515  AV_COPY32(&nnz_cache[4+8*10], &nnz[4*11]);
516  }else{
517  AV_COPY32(&nnz_cache[4+8* 5], &nnz[4* 5]);
518  AV_COPY32(&nnz_cache[4+8*10], &nnz[4* 9]);
519  }
520  }else{
521  uint32_t top_empty = CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040;
522  AV_WN32A(&nnz_cache[4+8* 0], top_empty);
523  AV_WN32A(&nnz_cache[4+8* 5], top_empty);
524  AV_WN32A(&nnz_cache[4+8*10], top_empty);
525  }
526 
527  for (i=0; i<2; i++) {
528  if(left_type[LEFT(i)]){
529  nnz = h->non_zero_count[left_xy[LEFT(i)]];
530  nnz_cache[3+8* 1 + 2*8*i]= nnz[left_block[8+0+2*i]];
531  nnz_cache[3+8* 2 + 2*8*i]= nnz[left_block[8+1+2*i]];
532  if(CHROMA444){
533  nnz_cache[3+8* 6 + 2*8*i]= nnz[left_block[8+0+2*i]+4*4];
534  nnz_cache[3+8* 7 + 2*8*i]= nnz[left_block[8+1+2*i]+4*4];
535  nnz_cache[3+8*11 + 2*8*i]= nnz[left_block[8+0+2*i]+8*4];
536  nnz_cache[3+8*12 + 2*8*i]= nnz[left_block[8+1+2*i]+8*4];
537  }else if(CHROMA422) {
538  nnz_cache[3+8* 6 + 2*8*i]= nnz[left_block[8+0+2*i]-2+4*4];
539  nnz_cache[3+8* 7 + 2*8*i]= nnz[left_block[8+1+2*i]-2+4*4];
540  nnz_cache[3+8*11 + 2*8*i]= nnz[left_block[8+0+2*i]-2+8*4];
541  nnz_cache[3+8*12 + 2*8*i]= nnz[left_block[8+1+2*i]-2+8*4];
542  }else{
543  nnz_cache[3+8* 6 + 8*i]= nnz[left_block[8+4+2*i]];
544  nnz_cache[3+8*11 + 8*i]= nnz[left_block[8+5+2*i]];
545  }
546  }else{
547  nnz_cache[3+8* 1 + 2*8*i]=
548  nnz_cache[3+8* 2 + 2*8*i]=
549  nnz_cache[3+8* 6 + 2*8*i]=
550  nnz_cache[3+8* 7 + 2*8*i]=
551  nnz_cache[3+8*11 + 2*8*i]=
552  nnz_cache[3+8*12 + 2*8*i]= CABAC && !IS_INTRA(mb_type) ? 0 : 64;
553  }
554  }
555 
556  if( CABAC ) {
557  // top_cbp
558  if(top_type) {
559  h->top_cbp = h->cbp_table[top_xy];
560  } else {
561  h->top_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
562  }
563  // left_cbp
564  if (left_type[LTOP]) {
565  h->left_cbp = (h->cbp_table[left_xy[LTOP]] & 0x7F0)
566  | ((h->cbp_table[left_xy[LTOP]]>>(left_block[0]&(~1)))&2)
567  | (((h->cbp_table[left_xy[LBOT]]>>(left_block[2]&(~1)))&2) << 2);
568  } else {
569  h->left_cbp = IS_INTRA(mb_type) ? 0x7CF : 0x00F;
570  }
571  }
572  }
573 
574  if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){
575  int list;
576  int b_stride = h->b_stride;
577  for(list=0; list<h->list_count; list++){
578  int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
579  int8_t *ref = s->current_picture.f.ref_index[list];
580  int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
581  int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
582  if(!USES_LIST(mb_type, list)){
583  continue;
584  }
585  assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
586 
587  if(USES_LIST(top_type, list)){
588  const int b_xy= h->mb2b_xy[top_xy] + 3*b_stride;
589  AV_COPY128(mv_cache[0 - 1*8], mv[b_xy + 0]);
590  ref_cache[0 - 1*8]=
591  ref_cache[1 - 1*8]= ref[4*top_xy + 2];
592  ref_cache[2 - 1*8]=
593  ref_cache[3 - 1*8]= ref[4*top_xy + 3];
594  }else{
595  AV_ZERO128(mv_cache[0 - 1*8]);
596  AV_WN32A(&ref_cache[0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101u);
597  }
598 
599  if(mb_type & (MB_TYPE_16x8|MB_TYPE_8x8)){
600  for(i=0; i<2; i++){
601  int cache_idx = -1 + i*2*8;
602  if(USES_LIST(left_type[LEFT(i)], list)){
603  const int b_xy= h->mb2b_xy[left_xy[LEFT(i)]] + 3;
604  const int b8_xy= 4*left_xy[LEFT(i)] + 1;
605  AV_COPY32(mv_cache[cache_idx ], mv[b_xy + b_stride*left_block[0+i*2]]);
606  AV_COPY32(mv_cache[cache_idx+8], mv[b_xy + b_stride*left_block[1+i*2]]);
607  ref_cache[cache_idx ]= ref[b8_xy + (left_block[0+i*2]&~1)];
608  ref_cache[cache_idx+8]= ref[b8_xy + (left_block[1+i*2]&~1)];
609  }else{
610  AV_ZERO32(mv_cache[cache_idx ]);
611  AV_ZERO32(mv_cache[cache_idx+8]);
612  ref_cache[cache_idx ]=
613  ref_cache[cache_idx+8]= (left_type[LEFT(i)]) ? LIST_NOT_USED : PART_NOT_AVAILABLE;
614  }
615  }
616  }else{
617  if(USES_LIST(left_type[LTOP], list)){
618  const int b_xy= h->mb2b_xy[left_xy[LTOP]] + 3;
619  const int b8_xy= 4*left_xy[LTOP] + 1;
620  AV_COPY32(mv_cache[-1], mv[b_xy + b_stride*left_block[0]]);
621  ref_cache[-1]= ref[b8_xy + (left_block[0]&~1)];
622  }else{
623  AV_ZERO32(mv_cache[-1]);
624  ref_cache[-1]= left_type[LTOP] ? LIST_NOT_USED : PART_NOT_AVAILABLE;
625  }
626  }
627 
628  if(USES_LIST(topright_type, list)){
629  const int b_xy= h->mb2b_xy[topright_xy] + 3*b_stride;
630  AV_COPY32(mv_cache[4 - 1*8], mv[b_xy]);
631  ref_cache[4 - 1*8]= ref[4*topright_xy + 2];
632  }else{
633  AV_ZERO32(mv_cache[4 - 1*8]);
634  ref_cache[4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
635  }
636  if(ref_cache[4 - 1*8] < 0){
637  if(USES_LIST(topleft_type, list)){
638  const int b_xy = h->mb2b_xy[topleft_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride);
639  const int b8_xy= 4*topleft_xy + 1 + (h->topleft_partition & 2);
640  AV_COPY32(mv_cache[-1 - 1*8], mv[b_xy]);
641  ref_cache[-1 - 1*8]= ref[b8_xy];
642  }else{
643  AV_ZERO32(mv_cache[-1 - 1*8]);
644  ref_cache[-1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE;
645  }
646  }
647 
648  if((mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2)) && !FRAME_MBAFF)
649  continue;
650 
651  if(!(mb_type&(MB_TYPE_SKIP|MB_TYPE_DIRECT2))){
652  uint8_t (*mvd_cache)[2] = &h->mvd_cache[list][scan8[0]];
653  uint8_t (*mvd)[2] = h->mvd_table[list];
654  ref_cache[2+8*0] =
655  ref_cache[2+8*2] = PART_NOT_AVAILABLE;
656  AV_ZERO32(mv_cache[2+8*0]);
657  AV_ZERO32(mv_cache[2+8*2]);
658 
659  if( CABAC ) {
660  if(USES_LIST(top_type, list)){
661  const int b_xy= h->mb2br_xy[top_xy];
662  AV_COPY64(mvd_cache[0 - 1*8], mvd[b_xy + 0]);
663  }else{
664  AV_ZERO64(mvd_cache[0 - 1*8]);
665  }
666  if(USES_LIST(left_type[LTOP], list)){
667  const int b_xy= h->mb2br_xy[left_xy[LTOP]] + 6;
668  AV_COPY16(mvd_cache[-1 + 0*8], mvd[b_xy - left_block[0]]);
669  AV_COPY16(mvd_cache[-1 + 1*8], mvd[b_xy - left_block[1]]);
670  }else{
671  AV_ZERO16(mvd_cache[-1 + 0*8]);
672  AV_ZERO16(mvd_cache[-1 + 1*8]);
673  }
674  if(USES_LIST(left_type[LBOT], list)){
675  const int b_xy= h->mb2br_xy[left_xy[LBOT]] + 6;
676  AV_COPY16(mvd_cache[-1 + 2*8], mvd[b_xy - left_block[2]]);
677  AV_COPY16(mvd_cache[-1 + 3*8], mvd[b_xy - left_block[3]]);
678  }else{
679  AV_ZERO16(mvd_cache[-1 + 2*8]);
680  AV_ZERO16(mvd_cache[-1 + 3*8]);
681  }
682  AV_ZERO16(mvd_cache[2+8*0]);
683  AV_ZERO16(mvd_cache[2+8*2]);
685  uint8_t *direct_cache = &h->direct_cache[scan8[0]];
686  uint8_t *direct_table = h->direct_table;
687  fill_rectangle(direct_cache, 4, 4, 8, MB_TYPE_16x16>>1, 1);
688 
689  if(IS_DIRECT(top_type)){
690  AV_WN32A(&direct_cache[-1*8], 0x01010101u*(MB_TYPE_DIRECT2>>1));
691  }else if(IS_8X8(top_type)){
692  int b8_xy = 4*top_xy;
693  direct_cache[0 - 1*8]= direct_table[b8_xy + 2];
694  direct_cache[2 - 1*8]= direct_table[b8_xy + 3];
695  }else{
696  AV_WN32A(&direct_cache[-1*8], 0x01010101*(MB_TYPE_16x16>>1));
697  }
698 
699  if(IS_DIRECT(left_type[LTOP]))
700  direct_cache[-1 + 0*8]= MB_TYPE_DIRECT2>>1;
701  else if(IS_8X8(left_type[LTOP]))
702  direct_cache[-1 + 0*8]= direct_table[4*left_xy[LTOP] + 1 + (left_block[0]&~1)];
703  else
704  direct_cache[-1 + 0*8]= MB_TYPE_16x16>>1;
705 
706  if(IS_DIRECT(left_type[LBOT]))
707  direct_cache[-1 + 2*8]= MB_TYPE_DIRECT2>>1;
708  else if(IS_8X8(left_type[LBOT]))
709  direct_cache[-1 + 2*8]= direct_table[4*left_xy[LBOT] + 1 + (left_block[2]&~1)];
710  else
711  direct_cache[-1 + 2*8]= MB_TYPE_16x16>>1;
712  }
713  }
714  }
715  if(FRAME_MBAFF){
716 #define MAP_MVS\
717  MAP_F2F(scan8[0] - 1 - 1*8, topleft_type)\
718  MAP_F2F(scan8[0] + 0 - 1*8, top_type)\
719  MAP_F2F(scan8[0] + 1 - 1*8, top_type)\
720  MAP_F2F(scan8[0] + 2 - 1*8, top_type)\
721  MAP_F2F(scan8[0] + 3 - 1*8, top_type)\
722  MAP_F2F(scan8[0] + 4 - 1*8, topright_type)\
723  MAP_F2F(scan8[0] - 1 + 0*8, left_type[LTOP])\
724  MAP_F2F(scan8[0] - 1 + 1*8, left_type[LTOP])\
725  MAP_F2F(scan8[0] - 1 + 2*8, left_type[LBOT])\
726  MAP_F2F(scan8[0] - 1 + 3*8, left_type[LBOT])
727  if(MB_FIELD){
728 #define MAP_F2F(idx, mb_type)\
729  if(!IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
730  h->ref_cache[list][idx] <<= 1;\
731  h->mv_cache[list][idx][1] /= 2;\
732  h->mvd_cache[list][idx][1] >>=1;\
733  }
734  MAP_MVS
735 #undef MAP_F2F
736  }else{
737 #define MAP_F2F(idx, mb_type)\
738  if(IS_INTERLACED(mb_type) && h->ref_cache[list][idx] >= 0){\
739  h->ref_cache[list][idx] >>= 1;\
740  h->mv_cache[list][idx][1] <<= 1;\
741  h->mvd_cache[list][idx][1] <<= 1;\
742  }
743  MAP_MVS
744 #undef MAP_F2F
745  }
746  }
747  }
748  }
749 
750  h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[LTOP]);
751 }
752 
757  MpegEncContext * const s = &h->s;
758  const int mb_xy= h->mb_xy;
759  int mb_type=0;
760 
761  memset(h->non_zero_count[mb_xy], 0, 48);
762 
763  if(MB_FIELD)
764  mb_type|= MB_TYPE_INTERLACED;
765 
767  {
768  // just for fill_caches. pred_direct_motion will set the real mb_type
770  if(h->direct_spatial_mv_pred){
771  fill_decode_neighbors(h, mb_type);
772  fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
773  }
774  ff_h264_pred_direct_motion(h, &mb_type);
775  mb_type|= MB_TYPE_SKIP;
776  }
777  else
778  {
780 
781  fill_decode_neighbors(h, mb_type);
783  }
784 
785  write_back_motion(h, mb_type);
786  s->current_picture.f.mb_type[mb_xy] = mb_type;
787  s->current_picture.f.qscale_table[mb_xy] = s->qscale;
788  h->slice_table[ mb_xy ]= h->slice_num;
789  h->prev_mb_skipped= 1;
790 }
791 
792 #endif /* AVCODEC_H264_MVPRED_H */