Ticket #4948: use_swscale_patch.diff
File use_swscale_patch.diff, 9.0 KB (added by , 14 years ago) |
---|
-
AVCodecDecoder.h
14 14 15 15 #include <MediaFormats.h> 16 16 17 extern "C" { 18 #include "swscale.h" 19 } 20 17 21 #include "DecoderPlugin.h" 18 22 #include "ReaderPlugin.h" 19 23 … … 80 84 bool fCodecInitDone; 81 85 82 86 gfx_convert_func fFormatConversionFunc; 87 SwsContext* fSwsContext; 83 88 84 89 char* fExtraData; 85 90 int fExtraDataSize; -
gfx_util.h
30 30 // will become: 31 31 typedef void (*gfx_convert_func) (AVFrame *in, AVFrame *out, int width, int height); 32 32 33 // this function will try to find the best colorspaces for both the ff-codec and 33 // this function will try to find the best colorspaces for both the ff-codec and 34 34 // the Media Kit sides. 35 35 gfx_convert_func resolve_colorspace(color_space cs, PixelFormat pixelFormat); 36 36 37 const char *pixfmt_to_string(int p);37 const char *pixfmt_to_string(int format); 38 38 39 color_space pixfmt_to_colorspace(int p); 39 color_space pixfmt_to_colorspace(int format); 40 PixelFormat colorspace_to_pixfmt(color_space format); 40 41 41 42 void dump_ffframe(AVFrame *frame, const char *name); 42 43 -
AVCodecDecoder.cpp
73 73 fCodecInitDone(false), 74 74 75 75 fFormatConversionFunc(NULL), 76 fSwsContext(NULL), 76 77 77 78 fExtraData(NULL), 78 79 fExtraDataSize(0), … … 115 116 free(fInputPicture); 116 117 free(fContext); 117 118 119 if (fSwsContext != NULL) 120 sws_freeContext(fSwsContext); 121 118 122 delete[] fExtraData; 119 123 delete[] fOutputBuffer; 120 124 } … … 453 457 // time using another pixel-format that is supported by the decoder. 454 458 // But libavcodec doesn't seem to offer any way to tell the decoder 455 459 // which format it should use. 456 fFormatConversionFunc = 0; 460 // fFormatConversionFunc = 0; 461 fSwsContext = NULL; 457 462 // Iterate over supported codec formats 458 463 for (int i = 0; i < 1; i++) { 459 464 // close any previous instance … … 465 470 if (avcodec_open(fContext, fCodec) >= 0) { 466 471 fCodecInitDone = true; 467 472 468 fFormatConversionFunc = resolve_colorspace( 469 fOutputVideoFormat.display.format, fContext->pix_fmt); 473 // fFormatConversionFunc = resolve_colorspace( 474 // fOutputVideoFormat.display.format, fContext->pix_fmt); 475 fSwsContext = sws_getContext(fContext->width, fContext->height, 476 fContext->pix_fmt, fContext->width, fContext->height, 477 colorspace_to_pixfmt(fOutputVideoFormat.display.format), 478 SWS_FAST_BILINEAR, NULL, NULL, NULL); 470 479 } 471 if (fFormatConversionFunc != NULL)472 break;480 // if (fFormatConversionFunc != NULL) 481 // break; 473 482 } 474 483 475 484 if (!fCodecInitDone) { … … 477 486 return B_ERROR; 478 487 } 479 488 480 if (fFormatConversionFunc == NULL) { 481 TRACE("no pixel format conversion function found or decoder has " 482 "not set the pixel format yet!\n"); 489 // if (fFormatConversionFunc == NULL) { 490 // TRACE("no pixel format conversion function found or decoder has " 491 // "not set the pixel format yet!\n"); 492 // } 493 if (fSwsContext == NULL) { 494 TRACE("No SWS Scale context or decoder has not set the pixel format " 495 "yet!\n"); 483 496 } 484 497 485 498 if (fOutputVideoFormat.display.format == B_YCbCr422) { … … 707 720 // pixfmt_to_string(fContext->pix_fmt)); 708 721 709 722 // Some decoders do not set pix_fmt until they have decoded 1 frame 710 if (fFormatConversionFunc == NULL) { 711 fFormatConversionFunc = resolve_colorspace( 712 fOutputVideoFormat.display.format, fContext->pix_fmt); 723 // if (fFormatConversionFunc == NULL) { 724 // fFormatConversionFunc = resolve_colorspace( 725 // fOutputVideoFormat.display.format, fContext->pix_fmt); 726 // } 727 if (fSwsContext == NULL) { 728 fSwsContext = sws_getContext(fContext->width, fContext->height, 729 fContext->pix_fmt, fContext->width, fContext->height, 730 colorspace_to_pixfmt(fOutputVideoFormat.display.format), 731 SWS_FAST_BILINEAR, NULL, NULL, NULL); 713 732 } 733 714 734 fOutputPicture->data[0] = (uint8_t*)outBuffer; 715 735 fOutputPicture->linesize[0] 716 736 = fOutputVideoFormat.display.bytes_per_row; 717 737 718 if (fFormatConversionFunc != NULL) { 738 // if (fFormatConversionFunc != NULL) { 739 if (fSwsContext != NULL) { 719 740 if (useDeinterlacedPicture) { 720 741 AVFrame inputFrame; 721 742 inputFrame.data[0] = deinterlacedPicture.data[0]; … … 727 748 inputFrame.linesize[2] = deinterlacedPicture.linesize[2]; 728 749 inputFrame.linesize[3] = deinterlacedPicture.linesize[3]; 729 750 730 (*fFormatConversionFunc)(&inputFrame, 731 fOutputPicture, width, height); 751 // (*fFormatConversionFunc)(&inputFrame, 752 // fOutputPicture, width, height); 753 sws_scale(fSwsContext, inputFrame.data, 754 inputFrame.linesize, 0, fContext->height, 755 fOutputPicture->data, fOutputPicture->linesize); 732 756 } else { 733 (*fFormatConversionFunc)(fInputPicture, fOutputPicture, 734 width, height); 757 // (*fFormatConversionFunc)(fInputPicture, fOutputPicture, 758 // width, height); 759 // Run the pixel format conversion 760 sws_scale(fSwsContext, fInputPicture->data, 761 fInputPicture->linesize, 0, fContext->height, 762 fOutputPicture->data, fOutputPicture->linesize); 735 763 } 736 764 } 737 765 if (fInputPicture->interlaced_frame) -
gfx_util.cpp
16 16 #define TRACE(a...) 17 17 #endif 18 18 19 // this function will try to find the best colorspaces for both the ff-codec and 19 // this function will try to find the best colorspaces for both the ff-codec and 20 20 // the Media Kit sides. 21 21 gfx_convert_func resolve_colorspace(color_space colorSpace, PixelFormat pixelFormat) 22 22 { … … 34 34 return gfx_conv_yuv410p_rgb32_c; 35 35 // } 36 36 } 37 37 38 38 if (pixelFormat == PIX_FMT_YUV411P) { 39 39 // if (cpu.HasMMX()) { 40 40 // TRACE("resolve_colorspace: gfx_conv_yuv411p_rgb32_mmx\n"); … … 44 44 return gfx_conv_yuv411p_rgb32_c; 45 45 // } 46 46 } 47 47 48 48 if (pixelFormat == PIX_FMT_YUV420P || pixelFormat == PIX_FMT_YUVJ420P) { 49 49 if (cpu.HasSSE2()) { 50 50 TRACE("resolve_colorspace: gfx_conv_yuv420p_rgba32_sse2\n"); … … 54 54 return gfx_conv_YCbCr420p_RGB32_c; 55 55 } 56 56 } 57 57 58 58 if (pixelFormat == PIX_FMT_YUV422P || pixelFormat == PIX_FMT_YUVJ422P) { 59 59 if (cpu.HasSSE2()) { 60 60 return gfx_conv_yuv422p_rgba32_sse2; … … 95 95 return gfx_conv_yuv411p_ycbcr422_c; 96 96 // } 97 97 } 98 98 99 99 if (pixelFormat == PIX_FMT_YUV420P || pixelFormat == PIX_FMT_YUVJ420P) { 100 100 // if (cpu.HasMMX()) { 101 101 // TRACE("resolve_colorspace: gfx_conv_yuv420p_ycbcr422_mmx\n"); … … 105 105 return gfx_conv_yuv420p_ycbcr422_c; 106 106 // } 107 107 } 108 108 109 109 if (pixelFormat == PIX_FMT_YUYV422) { 110 110 // if (cpu.HasMMX()) { 111 111 // TRACE("resolve_colorspace: PIX_FMT_YUV422 => B_YCbCr422: gfx_conv_null_mmx\n"); … … 115 115 return gfx_conv_null_c; 116 116 // } 117 117 } 118 118 119 119 TRACE("resolve_colorspace: %s => B_YCbCr422: NULL\n", pixfmt_to_string(pixelFormat)); 120 120 return gfx_conv_null_c; 121 121 122 122 default: 123 123 TRACE("resolve_colorspace: default: NULL !!!\n"); 124 124 return NULL; … … 248 248 } 249 249 250 250 251 PixelFormat 252 colorspace_to_pixfmt(color_space format) 253 { 254 switch(format) { 255 default: 256 case B_NO_COLOR_SPACE: 257 return PIX_FMT_NONE; 258 259 // NOTE: See pixfmt_to_colorspace() for what these are. 260 case B_YUV420: 261 return PIX_FMT_YUV420P; 262 case B_YUV422: 263 return PIX_FMT_YUV422P; 264 case B_RGB24_BIG: 265 return PIX_FMT_RGB24; 266 case B_RGB24: 267 return PIX_FMT_BGR24; 268 case B_YUV444: 269 return PIX_FMT_YUV444P; 270 case B_RGBA32_BIG: 271 case B_RGB32_BIG: 272 return PIX_FMT_BGR32; 273 case B_YUV9: 274 return PIX_FMT_YUV410P; 275 case B_YUV12: 276 return PIX_FMT_YUV411P; 277 // TODO: YCbCr color spaces! These are not the same as YUV! 278 case B_RGB16_BIG: 279 return PIX_FMT_RGB565; 280 case B_RGB15_BIG: 281 return PIX_FMT_RGB555; 282 case B_GRAY8: 283 return PIX_FMT_GRAY8; 284 case B_GRAY1: 285 return PIX_FMT_MONOBLACK; 286 case B_CMAP8: 287 return PIX_FMT_PAL8; 288 case B_RGBA32: 289 case B_RGB32: 290 return PIX_FMT_RGB32; 291 case B_RGB16: 292 return PIX_FMT_BGR565; 293 case B_RGB15: 294 return PIX_FMT_BGR555; 295 } 296 } 297 298 251 299 #define BEGIN_TAG "\033[31m" 252 300 #define END_TAG "\033[0m" 253 301 254 302 void dump_ffframe(AVFrame *frame, const char *name) 255 303 { 256 304 const char *picttypes[] = {"no pict type", "intra", "predicted", "bidir pre", "s(gmc)-vop"}; 257 printf(BEGIN_TAG"AVFrame(%s) pts:%-10lld cnum:%-5d dnum:%-5d %s%s, ]\n"END_TAG, 258 name, 259 frame->pts, 260 frame->coded_picture_number, 261 frame->display_picture_number, 262 // frame->quality, 263 frame->key_frame?"keyframe, ":"", 305 printf(BEGIN_TAG"AVFrame(%s) pts:%-10lld cnum:%-5d dnum:%-5d %s%s, ]\n"END_TAG, 306 name, 307 frame->pts, 308 frame->coded_picture_number, 309 frame->display_picture_number, 310 // frame->quality, 311 frame->key_frame?"keyframe, ":"", 264 312 picttypes[frame->pict_type]); 265 313 // printf(BEGIN_TAG"\t\tlinesize[] = {%ld, %ld, %ld, %ld}\n"END_TAG, frame->linesize[0], frame->linesize[1], frame->linesize[2], frame->linesize[3]); 266 314 }