From e64259b497c78b06e8244d16931e68beab426099 Mon Sep 17 00:00:00 2001 From: homm Date: Wed, 25 May 2016 05:25:22 +0300 Subject: [PATCH 01/11] sse4 implementation --- libImaging/Convert.c | 48 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index b3e48e52b08..9e72bcc7114 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -35,6 +35,11 @@ #include "Imaging.h" +#include +#include +#include + + #define MAX(a, b) (a)>(b) ? (a) : (b) #define MIN(a, b) (a)<(b) ? (a) : (b) @@ -441,14 +446,41 @@ rgba2rgb(UINT8* out, const UINT8* in, int xsize) static void rgbA2rgba(UINT8* out, const UINT8* in, int xsize) { - int x; - unsigned int alpha, tmp; - for (x = 0; x < xsize; x++) { - alpha = in[3]; - *out++ = MULDIV255(*in++, alpha, tmp); - *out++ = MULDIV255(*in++, alpha, tmp); - *out++ = MULDIV255(*in++, alpha, tmp); - *out++ = *in++; + unsigned int tmp; + unsigned char alpha; + int x = 0; + __m128i zero = _mm_setzero_si128(); + __m128i half = _mm_set1_epi16(128); + __m128i maxalpha = _mm_set_epi16(255, 0, 0, 0, 255, 0, 0, 0); + __m128i source, pix1, pix2, factors; + + for (; x < xsize - 3; x += 4) { + source = _mm_loadu_si128((__m128i *) &in[x * 4]); + + pix1 = _mm_unpacklo_epi8(source, zero); + factors = _mm_shufflelo_epi16(pix1, _MM_SHUFFLE(3, 3, 3, 3)); + factors = _mm_shufflehi_epi16(factors, _MM_SHUFFLE(3, 3, 3, 3)); + factors = _mm_or_si128(factors, maxalpha); + pix1 = _mm_add_epi16(_mm_mullo_epi16(pix1, factors), half); + pix1 = _mm_add_epi16(pix1, _mm_srli_epi16(pix1, 8)); + pix1 = _mm_srli_epi16(pix1, 8); + + pix2 = _mm_unpackhi_epi8(source, zero); + factors = _mm_shufflelo_epi16(pix2, _MM_SHUFFLE(3, 3, 3, 3)); + factors = _mm_shufflehi_epi16(factors, _MM_SHUFFLE(3, 3, 3, 3)); + factors = _mm_or_si128(factors, maxalpha); + pix2 = _mm_add_epi16(_mm_mullo_epi16(pix2, factors), half); + pix2 = _mm_add_epi16(pix2, _mm_srli_epi16(pix2, 8)); + pix2 = _mm_srli_epi16(pix2, 8); + + _mm_storeu_si128((__m128i *) &out[x * 4], _mm_packus_epi16(pix1, pix2)); + } + for (; x < xsize; x++) { + alpha = in[x * 4 + 3]; + out[x * 4 + 0] = MULDIV255(in[x * 4 + 0], alpha, tmp); + out[x * 4 + 1] = MULDIV255(in[x * 4 + 1], alpha, tmp); + out[x * 4 + 2] = MULDIV255(in[x * 4 + 2], alpha, tmp); + out[x * 4 + 3] = alpha; } } From 3f730ffbb689dbc020a45e9c9553a8e73f760b65 Mon Sep 17 00:00:00 2001 From: homm Date: Wed, 25 May 2016 05:39:59 +0300 Subject: [PATCH 02/11] faster implementation --- libImaging/Convert.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index 9e72bcc7114..0d634e6e905 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -451,24 +451,23 @@ rgbA2rgba(UINT8* out, const UINT8* in, int xsize) int x = 0; __m128i zero = _mm_setzero_si128(); __m128i half = _mm_set1_epi16(128); - __m128i maxalpha = _mm_set_epi16(255, 0, 0, 0, 255, 0, 0, 0); - __m128i source, pix1, pix2, factors; + __m128i maxalpha = _mm_set1_epi32(0xff000000); + __m128i factormask = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3); + __m128i factorsource, source, pix1, pix2, factors; for (; x < xsize - 3; x += 4) { source = _mm_loadu_si128((__m128i *) &in[x * 4]); + factorsource = _mm_shuffle_epi8(source, factormask); + factorsource = _mm_or_si128(factorsource, maxalpha); pix1 = _mm_unpacklo_epi8(source, zero); - factors = _mm_shufflelo_epi16(pix1, _MM_SHUFFLE(3, 3, 3, 3)); - factors = _mm_shufflehi_epi16(factors, _MM_SHUFFLE(3, 3, 3, 3)); - factors = _mm_or_si128(factors, maxalpha); + factors = _mm_unpacklo_epi8(factorsource, zero); pix1 = _mm_add_epi16(_mm_mullo_epi16(pix1, factors), half); pix1 = _mm_add_epi16(pix1, _mm_srli_epi16(pix1, 8)); pix1 = _mm_srli_epi16(pix1, 8); pix2 = _mm_unpackhi_epi8(source, zero); - factors = _mm_shufflelo_epi16(pix2, _MM_SHUFFLE(3, 3, 3, 3)); - factors = _mm_shufflehi_epi16(factors, _MM_SHUFFLE(3, 3, 3, 3)); - factors = _mm_or_si128(factors, maxalpha); + factors = _mm_unpackhi_epi8(factorsource, zero); pix2 = _mm_add_epi16(_mm_mullo_epi16(pix2, factors), half); pix2 = _mm_add_epi16(pix2, _mm_srli_epi16(pix2, 8)); pix2 = _mm_srli_epi16(pix2, 8); From 4b0abfc6c99db6cbec0c1c17ef9a6355761a9f88 Mon Sep 17 00:00:00 2001 From: homm Date: Sat, 2 Jul 2016 16:11:10 +0300 Subject: [PATCH 03/11] avx2 implementation --- libImaging/Convert.c | 50 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index 0d634e6e905..788a00fd120 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -38,7 +38,10 @@ #include #include #include - +#if defined(__AVX2__) + #include +#endif + #define MAX(a, b) (a)>(b) ? (a) : (b) #define MIN(a, b) (a)<(b) ? (a) : (b) @@ -449,10 +452,47 @@ rgbA2rgba(UINT8* out, const UINT8* in, int xsize) unsigned int tmp; unsigned char alpha; int x = 0; + +#if defined(__AVX2__) + + __m256i zero = _mm256_setzero_si256(); + __m256i half = _mm256_set1_epi16(128); + __m256i maxalpha = _mm256_set_epi32( + 0xff000000, 0xff000000, 0xff000000, 0xff000000, + 0xff000000, 0xff000000, 0xff000000, 0xff000000); + __m256i factormask = _mm256_set_epi8( + 15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3, + 15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3); + __m256i factorsource, source, pix1, pix2, factors; + + for (; x < xsize - 7; x += 8) { + source = _mm256_loadu_si256((__m256i *) &in[x * 4]); + factorsource = _mm256_shuffle_epi8(source, factormask); + factorsource = _mm256_or_si256(factorsource, maxalpha); + + pix1 = _mm256_unpacklo_epi8(source, zero); + factors = _mm256_unpacklo_epi8(factorsource, zero); + pix1 = _mm256_add_epi16(_mm256_mullo_epi16(pix1, factors), half); + pix1 = _mm256_add_epi16(pix1, _mm256_srli_epi16(pix1, 8)); + pix1 = _mm256_srli_epi16(pix1, 8); + + pix2 = _mm256_unpackhi_epi8(source, zero); + factors = _mm256_unpackhi_epi8(factorsource, zero); + pix2 = _mm256_add_epi16(_mm256_mullo_epi16(pix2, factors), half); + pix2 = _mm256_add_epi16(pix2, _mm256_srli_epi16(pix2, 8)); + pix2 = _mm256_srli_epi16(pix2, 8); + + source = _mm256_packus_epi16(pix1, pix2); + _mm256_storeu_si256((__m256i *) &out[x * 4], source); + } + +#else + __m128i zero = _mm_setzero_si128(); __m128i half = _mm_set1_epi16(128); __m128i maxalpha = _mm_set1_epi32(0xff000000); - __m128i factormask = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3); + __m128i factormask = _mm_set_epi8( + 15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3); __m128i factorsource, source, pix1, pix2, factors; for (; x < xsize - 3; x += 4) { @@ -472,8 +512,12 @@ rgbA2rgba(UINT8* out, const UINT8* in, int xsize) pix2 = _mm_add_epi16(pix2, _mm_srli_epi16(pix2, 8)); pix2 = _mm_srli_epi16(pix2, 8); - _mm_storeu_si128((__m128i *) &out[x * 4], _mm_packus_epi16(pix1, pix2)); + source = _mm_packus_epi16(pix1, pix2); + _mm_storeu_si128((__m128i *) &out[x * 4], source); } + +#endif + for (; x < xsize; x++) { alpha = in[x * 4 + 3]; out[x * 4 + 0] = MULDIV255(in[x * 4 + 0], alpha, tmp); From 6b40458ab063adce37726d327b5ffe9789da05df Mon Sep 17 00:00:00 2001 From: homm Date: Sun, 29 May 2016 23:30:38 +0300 Subject: [PATCH 04/11] =?UTF-8?q?RGBa=20=E2=86=92=20RGBA=20convert=20using?= =?UTF-8?q?=20gather?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libImaging/Convert.c | 54 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index 788a00fd120..0f0489f93c1 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -527,14 +527,64 @@ rgbA2rgba(UINT8* out, const UINT8* in, int xsize) } } +int *rgba2rgbAtable = NULL; + /* RGBa -> RGBA conversion to remove premultiplication Needed for correct transforms/resizing on RGBA images */ static void rgba2rgbA(UINT8* out, const UINT8* in, int xsize) { - int x; + int x = 0; unsigned int alpha; - for (x = 0; x < xsize; x++, in+=4) { + +#if defined(__AVX2__) + + int a, c; + if ( ! rgba2rgbAtable) { + rgba2rgbAtable = (int *) malloc(256 * 256 * 2 * 4); + for (c = 0; c < 256; c++) { + rgba2rgbAtable[c] = c; + } + for (a = 1; a < 256; a++) { + for (c = 0; c < 256; c++) { + rgba2rgbAtable[a * 256 + c] = CLIP((255 * c) / a); + } + } + } + + for (; x < xsize - 7; x += 8) { + __m256i pix0, pix1, pix2, pix3; + __m256i source = _mm256_loadu_si256((__m256i *) &in[x * 4]); + + pix0 = _mm256_shuffle_epi8(source, _mm256_set_epi8( + -1,-1,-1,3, -1,-1,3,2, -1,-1,3,1, -1,-1,3,0, + -1,-1,-1,3, -1,-1,3,2, -1,-1,3,1, -1,-1,3,0)); + pix0 = _mm256_i32gather_epi32(rgba2rgbAtable, pix0, 4); + + pix1 = _mm256_shuffle_epi8(source, _mm256_set_epi8( + -1,-1,-1,7, -1,-1,7,6, -1,-1,7,5, -1,-1,7,4, + -1,-1,-1,7, -1,-1,7,6, -1,-1,7,5, -1,-1,7,4)); + pix1 = _mm256_i32gather_epi32(rgba2rgbAtable, pix1, 4); + + pix2 = _mm256_shuffle_epi8(source, _mm256_set_epi8( + -1,-1,-1,11, -1,-1,11,10, -1,-1,11,9, -1,-1,11,8, + -1,-1,-1,11, -1,-1,11,10, -1,-1,11,9, -1,-1,11,8)); + pix2 = _mm256_i32gather_epi32(rgba2rgbAtable, pix2, 4); + + pix3 = _mm256_shuffle_epi8(source, _mm256_set_epi8( + -1,-1,-1,15, -1,-1,15,14, -1,-1,15,13, -1,-1,15,12, + -1,-1,-1,15, -1,-1,15,14, -1,-1,15,13, -1,-1,15,12)); + pix3 = _mm256_i32gather_epi32(rgba2rgbAtable, pix3, 4); + + pix0 = _mm256_packus_epi32(pix0, pix1); + pix2 = _mm256_packus_epi32(pix2, pix3); + source = _mm256_packus_epi16(pix0, pix2); + _mm256_storeu_si256((__m256i *) &out[x * 4], source); + } + +#endif + + for (; x < xsize; x++, in+=4) { alpha = in[3]; if (alpha == 255 || alpha == 0) { *out++ = in[0]; From cb88da6a603e6f0242dda430bd8ab9954f2ad33f Mon Sep 17 00:00:00 2001 From: homm Date: Tue, 16 Aug 2016 15:22:54 +0200 Subject: [PATCH 05/11] =?UTF-8?q?fix=20RGBa=20=E2=86=92=C2=A0RGBA=20conver?= =?UTF-8?q?sion=20on=20AVX2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libImaging/Convert.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index 0f0489f93c1..253c4107cef 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -584,7 +584,10 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) #endif - for (; x < xsize; x++, in+=4) { + in = &in[x * 4]; + out = &out[x * 4]; + + for (; x < xsize; x++, in += 4) { alpha = in[3]; if (alpha == 255 || alpha == 0) { *out++ = in[0]; From c51d7797430524d74caf3c97ae9e0de6bc52ba59 Mon Sep 17 00:00:00 2001 From: Alexander Date: Mon, 23 Jan 2017 13:52:21 +0300 Subject: [PATCH 06/11] use float div instead of gather --- libImaging/Convert.c | 67 ++++++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 36 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index 253c4107cef..9497b1dab8a 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -527,8 +527,6 @@ rgbA2rgba(UINT8* out, const UINT8* in, int xsize) } } -int *rgba2rgbAtable = NULL; - /* RGBa -> RGBA conversion to remove premultiplication Needed for correct transforms/resizing on RGBA images */ static void @@ -539,46 +537,43 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) #if defined(__AVX2__) - int a, c; - if ( ! rgba2rgbAtable) { - rgba2rgbAtable = (int *) malloc(256 * 256 * 2 * 4); - for (c = 0; c < 256; c++) { - rgba2rgbAtable[c] = c; - } - for (a = 1; a < 256; a++) { - for (c = 0; c < 256; c++) { - rgba2rgbAtable[a * 256 + c] = CLIP((255 * c) / a); - } - } - } - for (; x < xsize - 7; x += 8) { - __m256i pix0, pix1, pix2, pix3; + __m256 mmaf; + __m256i pix0, pix1, pix2, pix3, mma; + __m256 mma0, mma1, mma2, mma3; + __m256 half = _mm256_set1_ps(0.5); __m256i source = _mm256_loadu_si256((__m256i *) &in[x * 4]); - pix0 = _mm256_shuffle_epi8(source, _mm256_set_epi8( - -1,-1,-1,3, -1,-1,3,2, -1,-1,3,1, -1,-1,3,0, - -1,-1,-1,3, -1,-1,3,2, -1,-1,3,1, -1,-1,3,0)); - pix0 = _mm256_i32gather_epi32(rgba2rgbAtable, pix0, 4); - - pix1 = _mm256_shuffle_epi8(source, _mm256_set_epi8( - -1,-1,-1,7, -1,-1,7,6, -1,-1,7,5, -1,-1,7,4, - -1,-1,-1,7, -1,-1,7,6, -1,-1,7,5, -1,-1,7,4)); - pix1 = _mm256_i32gather_epi32(rgba2rgbAtable, pix1, 4); - - pix2 = _mm256_shuffle_epi8(source, _mm256_set_epi8( - -1,-1,-1,11, -1,-1,11,10, -1,-1,11,9, -1,-1,11,8, - -1,-1,-1,11, -1,-1,11,10, -1,-1,11,9, -1,-1,11,8)); - pix2 = _mm256_i32gather_epi32(rgba2rgbAtable, pix2, 4); - - pix3 = _mm256_shuffle_epi8(source, _mm256_set_epi8( - -1,-1,-1,15, -1,-1,15,14, -1,-1,15,13, -1,-1,15,12, - -1,-1,-1,15, -1,-1,15,14, -1,-1,15,13, -1,-1,15,12)); - pix3 = _mm256_i32gather_epi32(rgba2rgbAtable, pix3, 4); - + mma = _mm256_and_si256(source, _mm256_set_epi8( + 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, + 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); + + mmaf = _mm256_cvtepi32_ps(_mm256_srli_epi32(source, 24)); + mmaf = _mm256_mul_ps(_mm256_set1_ps(255), _mm256_rcp_ps(mmaf)); + + mma0 = _mm256_shuffle_ps(mmaf, mmaf, 0x00); + mma1 = _mm256_shuffle_ps(mmaf, mmaf, 0x55); + mma2 = _mm256_shuffle_ps(mmaf, mmaf, 0xaa); + mma3 = _mm256_shuffle_ps(mmaf, mmaf, 0xff); + + pix1 = _mm256_unpacklo_epi8(source, _mm256_setzero_si256()); + pix3 = _mm256_unpackhi_epi8(source, _mm256_setzero_si256()); + pix0 = _mm256_unpacklo_epi16(pix1, _mm256_setzero_si256()); + pix1 = _mm256_unpackhi_epi16(pix1, _mm256_setzero_si256()); + pix2 = _mm256_unpacklo_epi16(pix3, _mm256_setzero_si256()); + pix3 = _mm256_unpackhi_epi16(pix3, _mm256_setzero_si256()); + + pix0 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix0), mma0), half)); + pix1 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix1), mma1), half)); + pix2 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix2), mma2), half)); + pix3 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix3), mma3), half)); + pix0 = _mm256_packus_epi32(pix0, pix1); pix2 = _mm256_packus_epi32(pix2, pix3); source = _mm256_packus_epi16(pix0, pix2); + source = _mm256_blendv_epi8(source, mma, _mm256_set_epi8( + 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, + 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); _mm256_storeu_si256((__m256i *) &out[x * 4], source); } From 751a6649729c814e84f2069ec9badc9f98cc6edd Mon Sep 17 00:00:00 2001 From: Alexander Date: Mon, 23 Jan 2017 16:25:05 +0300 Subject: [PATCH 07/11] sse4 version (still 1.4x faster than previous avx2 implementation) --- libImaging/Convert.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index 9497b1dab8a..b9ec3fe1c1d 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -579,6 +579,44 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) #endif + for (; x < xsize - 3; x += 4) { + __m128 mmaf; + __m128i pix0, pix1, pix2, pix3, mma; + __m128 mma0, mma1, mma2, mma3; + __m128 half = _mm_set1_ps(0.5); + __m128i source = _mm_loadu_si128((__m128i *) &in[x * 4]); + + mma = _mm_and_si128(source, _mm_set_epi8( + 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); + + mmaf = _mm_cvtepi32_ps(_mm_srli_epi32(source, 24)); + mmaf = _mm_mul_ps(_mm_set1_ps(255), _mm_rcp_ps(mmaf)); + + mma0 = _mm_shuffle_ps(mmaf, mmaf, 0x00); + mma1 = _mm_shuffle_ps(mmaf, mmaf, 0x55); + mma2 = _mm_shuffle_ps(mmaf, mmaf, 0xaa); + mma3 = _mm_shuffle_ps(mmaf, mmaf, 0xff); + + pix1 = _mm_unpacklo_epi8(source, _mm_setzero_si128()); + pix3 = _mm_unpackhi_epi8(source, _mm_setzero_si128()); + pix0 = _mm_unpacklo_epi16(pix1, _mm_setzero_si128()); + pix1 = _mm_unpackhi_epi16(pix1, _mm_setzero_si128()); + pix2 = _mm_unpacklo_epi16(pix3, _mm_setzero_si128()); + pix3 = _mm_unpackhi_epi16(pix3, _mm_setzero_si128()); + + pix0 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix0), mma0), half)); + pix1 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix1), mma1), half)); + pix2 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix2), mma2), half)); + pix3 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix3), mma3), half)); + + pix0 = _mm_packus_epi32(pix0, pix1); + pix2 = _mm_packus_epi32(pix2, pix3); + source = _mm_packus_epi16(pix0, pix2); + source = _mm_blendv_epi8(source, mma, _mm_set_epi8( + 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); + _mm_storeu_si128((__m128i *) &out[x * 4], source); + } + in = &in[x * 4]; out = &out[x * 4]; From ca72e7c35c16e29058498655c1b77b9a2547aeef Mon Sep 17 00:00:00 2001 From: Alexander Date: Tue, 24 Jan 2017 02:46:36 +0300 Subject: [PATCH 08/11] use 16bit arithmetics --- libImaging/Convert.c | 86 +++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 50 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index b9ec3fe1c1d..1904ed66436 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -539,9 +539,8 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) for (; x < xsize - 7; x += 8) { __m256 mmaf; - __m256i pix0, pix1, pix2, pix3, mma; - __m256 mma0, mma1, mma2, mma3; - __m256 half = _mm256_set1_ps(0.5); + __m256i pix0, pix1, mma; + __m256i mma0, mma1; __m256i source = _mm256_loadu_si256((__m256i *) &in[x * 4]); mma = _mm256_and_si256(source, _mm256_set_epi8( @@ -549,28 +548,23 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); mmaf = _mm256_cvtepi32_ps(_mm256_srli_epi32(source, 24)); - mmaf = _mm256_mul_ps(_mm256_set1_ps(255), _mm256_rcp_ps(mmaf)); - - mma0 = _mm256_shuffle_ps(mmaf, mmaf, 0x00); - mma1 = _mm256_shuffle_ps(mmaf, mmaf, 0x55); - mma2 = _mm256_shuffle_ps(mmaf, mmaf, 0xaa); - mma3 = _mm256_shuffle_ps(mmaf, mmaf, 0xff); - - pix1 = _mm256_unpacklo_epi8(source, _mm256_setzero_si256()); - pix3 = _mm256_unpackhi_epi8(source, _mm256_setzero_si256()); - pix0 = _mm256_unpacklo_epi16(pix1, _mm256_setzero_si256()); - pix1 = _mm256_unpackhi_epi16(pix1, _mm256_setzero_si256()); - pix2 = _mm256_unpacklo_epi16(pix3, _mm256_setzero_si256()); - pix3 = _mm256_unpackhi_epi16(pix3, _mm256_setzero_si256()); - - pix0 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix0), mma0), half)); - pix1 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix1), mma1), half)); - pix2 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix2), mma2), half)); - pix3 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(pix3), mma3), half)); - - pix0 = _mm256_packus_epi32(pix0, pix1); - pix2 = _mm256_packus_epi32(pix2, pix3); - source = _mm256_packus_epi16(pix0, pix2); + mmaf = _mm256_mul_ps(_mm256_set1_ps(255 * 256), _mm256_rcp_ps(mmaf)); + mma1 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_set1_ps(0.5), mmaf)); + + mma0 = _mm256_shuffle_epi8(mma1, _mm256_set_epi8( + 5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0, + 5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0)); + mma1 = _mm256_shuffle_epi8(mma1, _mm256_set_epi8( + 13,12,13,12, 13,12,13,12, 9,8,9,8, 9,8,9,8, + 13,12,13,12, 13,12,13,12, 9,8,9,8, 9,8,9,8)); + + pix0 = _mm256_unpacklo_epi8(_mm256_setzero_si256(), source); + pix1 = _mm256_unpackhi_epi8(_mm256_setzero_si256(), source); + + pix0 = _mm256_mulhi_epu16(pix0, mma0); + pix1 = _mm256_mulhi_epu16(pix1, mma1); + + source = _mm256_packus_epi16(pix0, pix1); source = _mm256_blendv_epi8(source, mma, _mm256_set_epi8( 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); @@ -581,37 +575,29 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) for (; x < xsize - 3; x += 4) { __m128 mmaf; - __m128i pix0, pix1, pix2, pix3, mma; - __m128 mma0, mma1, mma2, mma3; - __m128 half = _mm_set1_ps(0.5); + __m128i pix0, pix1, mma; + __m128i mma0, mma1; __m128i source = _mm_loadu_si128((__m128i *) &in[x * 4]); mma = _mm_and_si128(source, _mm_set_epi8( 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); mmaf = _mm_cvtepi32_ps(_mm_srli_epi32(source, 24)); - mmaf = _mm_mul_ps(_mm_set1_ps(255), _mm_rcp_ps(mmaf)); - - mma0 = _mm_shuffle_ps(mmaf, mmaf, 0x00); - mma1 = _mm_shuffle_ps(mmaf, mmaf, 0x55); - mma2 = _mm_shuffle_ps(mmaf, mmaf, 0xaa); - mma3 = _mm_shuffle_ps(mmaf, mmaf, 0xff); - - pix1 = _mm_unpacklo_epi8(source, _mm_setzero_si128()); - pix3 = _mm_unpackhi_epi8(source, _mm_setzero_si128()); - pix0 = _mm_unpacklo_epi16(pix1, _mm_setzero_si128()); - pix1 = _mm_unpackhi_epi16(pix1, _mm_setzero_si128()); - pix2 = _mm_unpacklo_epi16(pix3, _mm_setzero_si128()); - pix3 = _mm_unpackhi_epi16(pix3, _mm_setzero_si128()); - - pix0 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix0), mma0), half)); - pix1 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix1), mma1), half)); - pix2 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix2), mma2), half)); - pix3 = _mm_cvtps_epi32(_mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(pix3), mma3), half)); - - pix0 = _mm_packus_epi32(pix0, pix1); - pix2 = _mm_packus_epi32(pix2, pix3); - source = _mm_packus_epi16(pix0, pix2); + mmaf = _mm_mul_ps(_mm_set1_ps(255 * 256), _mm_rcp_ps(mmaf)); + mma1 = _mm_cvtps_epi32(_mm_add_ps(_mm_set1_ps(0.5), mmaf)); + + mma0 = _mm_shuffle_epi8(mma1, _mm_set_epi8( + 5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0)); + mma1 = _mm_shuffle_epi8(mma1, _mm_set_epi8( + 13,12,13,12, 13,12,13,12, 9,8,9,8, 9,8,9,8)); + + pix0 = _mm_unpacklo_epi8(_mm_setzero_si128(), source); + pix1 = _mm_unpackhi_epi8(_mm_setzero_si128(), source); + + pix0 = _mm_mulhi_epu16(pix0, mma0); + pix1 = _mm_mulhi_epu16(pix1, mma1); + + source = _mm_packus_epi16(pix0, pix1); source = _mm_blendv_epi8(source, mma, _mm_set_epi8( 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); _mm_storeu_si128((__m128i *) &out[x * 4], source); From c4b801f5e41d8814ad97714513cad908fe7d73ec Mon Sep 17 00:00:00 2001 From: Alexander Date: Thu, 10 Aug 2017 01:23:32 +0300 Subject: [PATCH 09/11] fix rounding and speedup a bit --- libImaging/Convert.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index 1904ed66436..b360c548da1 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -548,8 +548,8 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); mmaf = _mm256_cvtepi32_ps(_mm256_srli_epi32(source, 24)); - mmaf = _mm256_mul_ps(_mm256_set1_ps(255 * 256), _mm256_rcp_ps(mmaf)); - mma1 = _mm256_cvtps_epi32(_mm256_add_ps(_mm256_set1_ps(0.5), mmaf)); + mmaf = _mm256_mul_ps(_mm256_set1_ps(255.5 * 256), _mm256_rcp_ps(mmaf)); + mma1 = _mm256_cvtps_epi32(mmaf); mma0 = _mm256_shuffle_epi8(mma1, _mm256_set_epi8( 5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0, @@ -583,8 +583,8 @@ rgba2rgbA(UINT8* out, const UINT8* in, int xsize) 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0, 0xff,0,0,0)); mmaf = _mm_cvtepi32_ps(_mm_srli_epi32(source, 24)); - mmaf = _mm_mul_ps(_mm_set1_ps(255 * 256), _mm_rcp_ps(mmaf)); - mma1 = _mm_cvtps_epi32(_mm_add_ps(_mm_set1_ps(0.5), mmaf)); + mmaf = _mm_mul_ps(_mm_set1_ps(255.5 * 256), _mm_rcp_ps(mmaf)); + mma1 = _mm_cvtps_epi32(mmaf); mma0 = _mm_shuffle_epi8(mma1, _mm_set_epi8( 5,4,5,4, 5,4,5,4, 1,0,1,0, 1,0,1,0)); From 9940519e3bcf486c0ba50fad443a64d1a2c722d7 Mon Sep 17 00:00:00 2001 From: Alexander Date: Sun, 10 Sep 2017 02:37:04 +0300 Subject: [PATCH 10/11] =?UTF-8?q?RGB=20=E2=86=92=20L=202.2=20times=20faste?= =?UTF-8?q?r?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libImaging/Convert.c | 42 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index b360c548da1..ab6b12c3a2c 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -217,10 +217,25 @@ rgb2bit(UINT8* out, const UINT8* in, int xsize) static void rgb2l(UINT8* out, const UINT8* in, int xsize) { - int x; - for (x = 0; x < xsize; x++, in += 4) + int x = 0; + __m128i coeff = _mm_set_epi16( + 0, 3735, 19235, 9798, 0, 3735, 19235, 9798); + for (; x < xsize - 3; x += 4, in += 16) { + __m128i pix0, pix1; + __m128i source = _mm_loadu_si128((__m128i*)in); + pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128()); + pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128()); + pix0 = _mm_madd_epi16(pix0, coeff); + pix1 = _mm_madd_epi16(pix1, coeff); + pix0 = _mm_hadd_epi32(pix0, pix1); + pix0 = _mm_srli_epi32(pix0, 15); + pix0 = _mm_packus_epi32(pix0, pix0); + pix0 = _mm_packus_epi16(pix0, pix0); + *(UINT32*)&out[x] = _mm_cvtsi128_si32(pix0); + } + for (; x < xsize; x++, in += 4) /* ITU-R Recommendation 601-2 (assuming nonlinear RGB) */ - *out++ = L24(in) >> 16; + out[x] = L24(in) >> 16; } static void @@ -426,8 +441,25 @@ rgb2rgba(UINT8* out, const UINT8* in, int xsize) static void rgba2la(UINT8* out, const UINT8* in, int xsize) { - int x; - for (x = 0; x < xsize; x++, in += 4, out += 4) { + int x = 0; + __m128i coeff = _mm_set_epi16( + 0, 3735, 19235, 9798, 0, 3735, 19235, 9798); + for (; x < xsize - 3; x += 4, in += 16, out += 16) { + __m128i pix0, pix1; + __m128i source = _mm_loadu_si128((__m128i*)in); + __m128i alpha = _mm_and_si128(source, _mm_set1_epi32(0xff000000)); + pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128()); + pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128()); + pix0 = _mm_madd_epi16(pix0, coeff); + pix1 = _mm_madd_epi16(pix1, coeff); + pix0 = _mm_hadd_epi32(pix0, pix1); + pix0 = _mm_srli_epi32(pix0, 15); + pix0 = _mm_shuffle_epi8(pix0, _mm_set_epi8( + -1,12,12,12, -1,8,8,8, -1,4,4,4, -1,0,0,0)); + pix0 = _mm_or_si128(pix0, alpha); + _mm_storeu_si128((__m128i*)out, pix0); + } + for (; x < xsize; x++, in += 4, out += 4) { /* ITU-R Recommendation 601-2 (assuming nonlinear RGB) */ out[0] = out[1] = out[2] = L24(in) >> 16; out[3] = in[3]; From 2a41d6363043837ce1a276a50fce8025f744c483 Mon Sep 17 00:00:00 2001 From: Alexander Date: Sun, 10 Sep 2017 03:04:34 +0300 Subject: [PATCH 11/11] Speedup other 2L convertions --- libImaging/Convert.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/libImaging/Convert.c b/libImaging/Convert.c index ab6b12c3a2c..44b6d979b7d 100644 --- a/libImaging/Convert.c +++ b/libImaging/Convert.c @@ -241,8 +241,24 @@ rgb2l(UINT8* out, const UINT8* in, int xsize) static void rgb2la(UINT8* out, const UINT8* in, int xsize) { - int x; - for (x = 0; x < xsize; x++, in += 4, out += 4) { + int x = 0; + __m128i coeff = _mm_set_epi16( + 0, 3735, 19235, 9798, 0, 3735, 19235, 9798); + for (; x < xsize - 3; x += 4, in += 16, out += 16) { + __m128i pix0, pix1; + __m128i source = _mm_loadu_si128((__m128i*)in); + pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128()); + pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128()); + pix0 = _mm_madd_epi16(pix0, coeff); + pix1 = _mm_madd_epi16(pix1, coeff); + pix0 = _mm_hadd_epi32(pix0, pix1); + pix0 = _mm_srli_epi32(pix0, 15); + pix0 = _mm_shuffle_epi8(pix0, _mm_set_epi8( + -1,12,12,12, -1,8,8,8, -1,4,4,4, -1,0,0,0)); + pix0 = _mm_or_si128(pix0, _mm_set1_epi32(0xff000000)); + _mm_storeu_si128((__m128i*)out, pix0); + } + for (; x < xsize; x++, in += 4, out += 4) { /* ITU-R Recommendation 601-2 (assuming nonlinear RGB) */ out[0] = out[1] = out[2] = L24(in) >> 16; out[3] = 255; @@ -252,9 +268,22 @@ rgb2la(UINT8* out, const UINT8* in, int xsize) static void rgb2i(UINT8* out_, const UINT8* in, int xsize) { - int x; + int x = 0; INT32* out = (INT32*) out_; - for (x = 0; x < xsize; x++, in += 4) + __m128i coeff = _mm_set_epi16( + 0, 3735, 19235, 9798, 0, 3735, 19235, 9798); + for (; x < xsize - 3; x += 4, in += 16, out += 4) { + __m128i pix0, pix1; + __m128i source = _mm_loadu_si128((__m128i*)in); + pix0 = _mm_unpacklo_epi8(source, _mm_setzero_si128()); + pix1 = _mm_unpackhi_epi8(source, _mm_setzero_si128()); + pix0 = _mm_madd_epi16(pix0, coeff); + pix1 = _mm_madd_epi16(pix1, coeff); + pix0 = _mm_hadd_epi32(pix0, pix1); + pix0 = _mm_srli_epi32(pix0, 15); + _mm_storeu_si128((__m128i*)out, pix0); + } + for (; x < xsize; x++, in += 4) *out++ = L24(in) >> 16; }