从yuv420转换为image>Bgr,byte>
本文关键字:Bgr byte image yuv420 转换 | 更新日期: 2023-09-27 18:09:58
我有yuv420数据的字节数组。
byte[] yuv420;//yuv data
如何将其转换为Image<Bgr, byte>
?
我发现一个数学公式转换为RGB,然后到Image<Bgr, byte>
,但它是非常慢的。有没有一种方法可以更快地转换它?
Emgu中有一个类用于转换
COLOR_CONVERSION(enum CV_YUV2RGB Convert YUV color to RGB)
但是我不明白如何使用这个类。有人能帮忙吗?
static Bitmap ConvertYUV2RGB(byte[] yuvFrame, byte[] rgbFrame, int width, int height)
{
int uIndex = width * height;
int vIndex = uIndex + ((width * height) >> 2);
int gIndex = width * height;
int bIndex = gIndex * 2;
int temp = 0;
//图片为pic1,RGB颜色的二进制数据转换得的int r,g,b;
Bitmap bm = new Bitmap(width, height);
int r = 0;
int g = 0;
int b = 0;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// R分量
temp = (int)(yuvFrame[y * width + x] + (yuvFrame[vIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[0, 2]);
rgbFrame[y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
// G分量
temp = (int)(yuvFrame[y * width + x] + (yuvFrame[uIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[1, 1] + (yuvFrame[vIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[1, 2]);
rgbFrame[gIndex + y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
// B分量
temp = (int)(yuvFrame[y * width + x] + (yuvFrame[uIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[2, 1]);
rgbFrame[bIndex + y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
Color c = Color.FromArgb(rgbFrame[y * width + x], rgbFrame[gIndex + y * width + x], rgbFrame[bIndex + y * width + x]);
bm.SetPixel(x, y, c);
}
}
return bm;
}
static double[,] YUV2RGB_CONVERT_MATRIX = new double[3, 3] { { 1, 0, 1.4022 }, { 1, -0.3456, -0.7145 }, { 1, 1.771, 0 } };
static byte clamp(float input)
{
if (input < 0) input = 0;
if (input > 255) input = 255;
return (byte)Math.Abs(input);
}
你很幸运,因为我之前正好解决了这个问题。代码中有一些链接可以获得更多信息。
一般来说,在进行图像处理时总是尝试使用指针,避免在嵌套循环中调用函数。在我的代码中,大小比较是迄今为止最慢的部分,但不幸的是,它是必需的(尝试使用预处理器开关关闭它)。我不得不说,虽然最后我从来没有使用过这个函数,因为它太慢了,我选择在c++中实现它,并使用p invoke从c#调用它。
private static unsafe void YUV2RGBManaged(byte[] YUVData, byte[] RGBData, int width, int height)
{
//returned pixel format is 2yuv - i.e. luminance, y, is represented for every pixel and the u and v are alternated
//like this (where Cb = u , Cr = y)
//Y0 Cb Y1 Cr Y2 Cb Y3
/*http://msdn.microsoft.com/en-us/library/ms893078.aspx
*
* C = Y - 16
D = U - 128
E = V - 128
R = clip(( 298 * C + 409 * E + 128) >> 8)
G = clip(( 298 * C - 100 * D - 208 * E + 128) >> 8)
B = clip(( 298 * C + 516 * D + 128) >> 8)
* here are a whole bunch more formats for doing this...
* http://stackoverflow.com/questions/3943779/converting-to-yuv-ycbcr-colour-space-many-versions
*/
fixed(byte* pRGBs = RGBData, pYUVs = YUVData)
{
for (int r = 0; r < height; r++)
{
byte* pRGB = pRGBs + r * width * 3;
byte* pYUV = pYUVs + r * width * 2;
//process two pixels at a time
for (int c = 0; c < width; c += 2)
{
int C1 = pYUV[1] - 16;
int C2 = pYUV[3] - 16;
int D = pYUV[2] - 128;
int E = pYUV[0] - 128;
int R1 = (298 * C1 + 409 * E + 128) >> 8;
int G1 = (298 * C1 - 100 * D - 208 * E + 128) >> 8;
int B1 = (298 * C1 + 516 * D + 128) >> 8;
int R2 = (298 * C2 + 409 * E + 128) >> 8;
int G2 = (298 * C2 - 100 * D - 208 * E + 128) >> 8;
int B2 = (298 * C2 + 516 * D + 128) >> 8;
#if true
//check for overflow
//unsurprisingly this takes the bulk of the time.
pRGB[0] = (byte)(R1 < 0 ? 0 : R1 > 255 ? 255 : R1);
pRGB[1] = (byte)(G1 < 0 ? 0 : G1 > 255 ? 255 : G1);
pRGB[2] = (byte)(B1 < 0 ? 0 : B1 > 255 ? 255 : B1);
pRGB[3] = (byte)(R2 < 0 ? 0 : R2 > 255 ? 255 : R2);
pRGB[4] = (byte)(G2 < 0 ? 0 : G2 > 255 ? 255 : G2);
pRGB[5] = (byte)(B2 < 0 ? 0 : B2 > 255 ? 255 : B2);
#else
pRGB[0] = (byte)(R1);
pRGB[1] = (byte)(G1);
pRGB[2] = (byte)(B1);
pRGB[3] = (byte)(R2);
pRGB[4] = (byte)(G2);
pRGB[5] = (byte)(B2);
#endif
pRGB += 6;
pYUV += 4;
}
}
}
}
如果你决定在c++中实现它
void YUV2RGB(void *yuvDataIn,void *rgbDataOut, int w, int h, int outNCh)
{
const int ch2 = 2 * outNCh;
unsigned char* pRGBs = (unsigned char*)rgbDataOut;
unsigned char* pYUVs = (unsigned char*)yuvDataIn;
for (int r = 0; r < h; r++)
{
unsigned char* pRGB = pRGBs + r * w * outNCh;
unsigned char* pYUV = pYUVs + r * w * 2;
//process two pixels at a time
for (int c = 0; c < w; c += 2)
{
int C1 = pYUV[1] - 16;
int C2 = pYUV[3] - 16;
int D = pYUV[2] - 128;
int E = pYUV[0] - 128;
int R1 = (298 * C1 + 409 * E + 128) >> 8;
int G1 = (298 * C1 - 100 * D - 208 * E + 128) >> 8;
int B1 = (298 * C1 + 516 * D + 128) >> 8;
int R2 = (298 * C2 + 409 * E + 128) >> 8;
int G2 = (298 * C2 - 100 * D - 208 * E + 128) >> 8;
int B2 = (298 * C2 + 516 * D + 128) >> 8;
//unsurprisingly this takes the bulk of the time.
pRGB[0] = (unsigned char)(R1 < 0 ? 0 : R1 > 255 ? 255 : R1);
pRGB[1] = (unsigned char)(G1 < 0 ? 0 : G1 > 255 ? 255 : G1);
pRGB[2] = (unsigned char)(B1 < 0 ? 0 : B1 > 255 ? 255 : B1);
pRGB[3] = (unsigned char)(R2 < 0 ? 0 : R2 > 255 ? 255 : R2);
pRGB[4] = (unsigned char)(G2 < 0 ? 0 : G2 > 255 ? 255 : G2);
pRGB[5] = (unsigned char)(B2 < 0 ? 0 : B2 > 255 ? 255 : B2);
pRGB += ch2;
pYUV += 4;
}
}
}
代码中最大的错误是使用了Bitmap.SetPixel
;在每次内循环迭代中执行此操作非常慢。相反,使用字节数组来存储您的RGB值,一旦填充,将其复制到位图中作为一个步骤。
其次,理解y, u和v是字节,因此只能有256个可能的值。因此,为r、g和b构建查找表是完全可行的,因此您不必在内部循环中执行任何计算。
最后,如果你真的想要性能,你必须在c++中使用指针算术和所有优化来编写。这个循环也是并行for的一个很好的候选,因为每次迭代都对独立的数据进行操作。还可以使用SSE intrinsic进一步优化这一点,每条指令转换几个像素。
我刚刚找到了一段旧代码,可能会对您有所帮助。使用OpenCVSharp进行YUV转换(免责声明:我删除了一些不必要的代码,还没有测试过!)
IplImage yuvImage = new IplImage(w, h, BitDepth.U8, 3);
IplImage rgbImage = new IplImage(w, h, BitDepth.U8, 3);
Cv.CvtColor(yuvImage, rgbImage, ColorConversion.CrCbToBgr);
回答你的其他问题-将byte[]转换为位图使用这个
int w= 100;
int h = 200;
int ch = 3;
byte[] imageData = new byte[w*h*ch]; //you image data here
Bitmap bitmap = new Bitmap(w,h,PixelFormat.Format24bppRgb);
BitmapData bmData = bitmap.LockBits(new System.Drawing.Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.ReadWrite, bitmap.PixelFormat);
IntPtr pNative = bmData.Scan0;
Marshal.Copy(imageData,0,pNative,w*h*ch);
bitmap.UnlockBits(bmData);
一个更快的模式。每像素两个乘法和两个加法更少:
private static unsafe void YUV2RGBManaged(byte[] YUVData, byte[] RGBData, int width, int height)
{
//returned pixel format is 2yuv - i.e. luminance, y, is represented for every pixel and the u and v are alternated
//like this (where Cb = u , Cr = y)
//Y0 Cb Y1 Cr Y2 Cb Y3
/*http://msdn.microsoft.com/en-us/library/ms893078.aspx
*
C = 298 * (Y - 16) + 128
D = U - 128
E = V - 128
R = clip(( C + 409 * E) >> 8)
G = clip(( C - 100 * D - 208 * E) >> 8)
B = clip(( C + 516 * D ) >> 8)
* here are a whole bunch more formats for doing this...
* http://stackoverflow.com/questions/3943779/converting-to-yuv-ycbcr-colour-space-many-versions
*/
fixed(byte* pRGBs = RGBData, pYUVs = YUVData)
{
for (int r = 0; r < height; r++)
{
byte* pRGB = pRGBs + r * width * 3;
byte* pYUV = pYUVs + r * width * 2;
//process two pixels at a time
for (int c = 0; c < width; c += 2)
{
int C1 = 298 * (pYUV[1] - 16) + 128;
int C2 = 298 * (pYUV[3] - 16) + 128;
int D = pYUV[2] - 128;
int E = pYUV[0] - 128;
int R1 = (C1 + 409 * E) >> 8;
int G1 = (C1 - 100 * D - 208 * E) >> 8;
int B1 = (C1 + 516 * D) >> 8;
int R2 = (C2 + 409 * E) >> 8;
int G2 = (C2 - 100 * D - 208 * E) >> 8;
int B2 = (298 * C2 + 516 * D) >> 8;
//check for overflow
//unsurprisingly this takes the bulk of the time.
pRGB[0] = (byte)(R1 < 0 ? 0 : R1 > 255 ? 255 : R1);
pRGB[1] = (byte)(G1 < 0 ? 0 : G1 > 255 ? 255 : G1);
pRGB[2] = (byte)(B1 < 0 ? 0 : B1 > 255 ? 255 : B1);
pRGB[3] = (byte)(R2 < 0 ? 0 : R2 > 255 ? 255 : R2);
pRGB[4] = (byte)(G2 < 0 ? 0 : G2 > 255 ? 255 : G2);
pRGB[5] = (byte)(B2 < 0 ? 0 : B2 > 255 ? 255 : B2);
pRGB += 6;
pYUV += 4;
}
}
}
}