summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJesper Thomschutz <jesper.thomschutz@nokia.com>2010-03-09 16:30:14 +0100
committerJesper Thomschutz <jesper.thomschutz@nokia.com>2010-03-09 16:30:14 +0100
commitf8cee12c350dfb8814d560994afe2feb7078220f (patch)
tree2a1807c08c01a959f776e61b0b88e20e48fe2607
parentc09f97c9887306d33e715fddc7ba96c8037e91bd (diff)
Revert "Replace the inline blend function by #define"
This reverts commit c1fe9ae25aebc8d1b9c4a7f3e67fa25ecdcbadc8.
-rw-r--r--src/gui/painting/qdrawhelper_sse2.cpp127
1 files changed, 67 insertions, 60 deletions
diff --git a/src/gui/painting/qdrawhelper_sse2.cpp b/src/gui/painting/qdrawhelper_sse2.cpp
index 6ac64d3b7a..1dba91402f 100644
--- a/src/gui/painting/qdrawhelper_sse2.cpp
+++ b/src/gui/painting/qdrawhelper_sse2.cpp
@@ -63,36 +63,36 @@ QT_BEGIN_NAMESPACE
* colorMask must have 0x00ff00ff on each 32 bits component
* half must have the value 128 (0x80) for each 32 bits compnent
*/
-#define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \
-{ \
- /* 1. separate the colors in 2 vectors so each color is on 16 bits \
- (in order to be multiplied by the alpha \
- each 32 bit of dstVectorAG are in the form 0x00AA00GG \
- each 32 bit of dstVectorRB are in the form 0x00RR00BB */\
- __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8); \
- __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask); \
- \
- /* 2. multiply the vectors by the alpha channel */\
- pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel); \
- pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel); \
- \
- /* 3. devide by 255, that's the tricky part. \
- we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256 */ \
- /** so first (X + X/256 + rounding) */\
- pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8)); \
- pixelVectorRB = _mm_add_epi16(pixelVectorRB, half); \
- pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8)); \
- pixelVectorAG = _mm_add_epi16(pixelVectorAG, half); \
- \
- /** second devide by 256 */\
- pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8); \
- /** for AG, we could >> 8 to divide followed by << 8 to put the \
- bytes in the correct position. By masking instead, we execute \
- only one instruction */\
- pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG); \
- \
- /* 4. combine the 2 pairs of colors */ \
- result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \
+Q_STATIC_INLINE_FUNCTION __m128i BYTE_MUL_SSE2(const __m128i pixelVector, const __m128i alphaChannel, const __m128i colorMask, const __m128i half)
+{
+ // 1. separate the colors in 2 vectors so each color is on 16 bits
+ // (in order to be multiplied by the alpha
+ // each 32 bit of dstVectorAG are in the form 0x00AA00GG
+ // each 32 bit of dstVectorRB are in the form 0x00RR00BB
+ __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8);
+ __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask);
+
+ // 2. multiply the vectors by the alpha channel
+ pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel);
+ pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel);
+
+ // 3. devide by 255, that's the tricky part.
+ // we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256
+ /// so first (X + X/256 + rounding)
+ pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8));
+ pixelVectorRB = _mm_add_epi16(pixelVectorRB, half);
+ pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8));
+ pixelVectorAG = _mm_add_epi16(pixelVectorAG, half);
+
+ /// second devide by 256
+ pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8);
+ /// for AG, we could >> 8 to divide followed by << 8 to put the
+ /// bytes in the correct position. By masking instead, we execute
+ /// only one instruction
+ pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG);
+
+ // 4. combine the 2 pairs of colors
+ return _mm_or_si128(pixelVectorAG, pixelVectorRB);
}
/*
@@ -101,29 +101,34 @@ QT_BEGIN_NAMESPACE
* colorMask must have 0x00ff00ff on each 32 bits component
* half must have the value 128 (0x80) for each 32 bits compnent
*/
-#define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) { \
- /* interpolate AG */\
- __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8); \
- __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8); \
- __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel); \
- __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel); \
- __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha); \
- finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8)); \
- finalAG = _mm_add_epi16(finalAG, half); \
- finalAG = _mm_andnot_si128(colorMask, finalAG); \
- \
- /* interpolate RB */\
- __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask); \
- __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask); \
- __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel); \
- __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel); \
- __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha); \
- finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8)); \
- finalRB = _mm_add_epi16(finalRB, half); \
- finalRB = _mm_srli_epi16(finalRB, 8); \
- \
- /* combine */\
- result = _mm_or_si128(finalAG, finalRB); \
+Q_STATIC_INLINE_FUNCTION __m128i INTERPOLATE_PIXEL_255_SSE2(const __m128i srcVector,
+ const __m128i dstVector,
+ const __m128i alphaChannel,
+ const __m128i oneMinusAlphaChannel ,
+ const __m128i colorMask,
+ const __m128i half) {
+ // interpolate AG
+ __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8);
+ __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8);
+ __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel);
+ __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel);
+ __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha);
+ finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8));
+ finalAG = _mm_add_epi16(finalAG, half);
+ finalAG = _mm_andnot_si128(colorMask, finalAG);
+
+ // interpolate RB
+ __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask);
+ __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask);
+ __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel);
+ __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel);
+ __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha);
+ finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8));
+ finalRB = _mm_add_epi16(finalRB, half);
+ finalRB = _mm_srli_epi16(finalRB, 8);
+
+ // combine
+ return _mm_or_si128(finalAG, finalRB);
}
void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
@@ -160,8 +165,7 @@ void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
alphaChannel = _mm_sub_epi16(one, alphaChannel);
const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]);
- __m128i destMultipliedByOneMinusAlpha;
- BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
+ const __m128i destMultipliedByOneMinusAlpha = BYTE_MUL_SSE2(dstVector, alphaChannel, colorMask, half);
// result = s + d * (1-alpha)
const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
@@ -193,15 +197,14 @@ void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
for (; x < w-3; x += 4) {
__m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) {
- BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half);
+ srcVector = BYTE_MUL_SSE2(srcVector, constAlphaVector, colorMask, half);
__m128i alphaChannel = _mm_srli_epi32(srcVector, 24);
alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16));
alphaChannel = _mm_sub_epi16(one, alphaChannel);
const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]);
- __m128i destMultipliedByOneMinusAlpha;
- BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half);
+ const __m128i destMultipliedByOneMinusAlpha = BYTE_MUL_SSE2(dstVector, alphaChannel, colorMask, half);
const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha);
_mm_storeu_si128((__m128i *)&dst[x], result);
@@ -249,8 +252,12 @@ void qt_blend_rgb32_on_rgb32_sse2(uchar *destPixels, int dbpl,
__m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) {
const __m128i dstVector = _mm_loadu_si128((__m128i *)&dst[x]);
- __m128i result;
- INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half);
+ const __m128i result = INTERPOLATE_PIXEL_255_SSE2(srcVector,
+ dstVector,
+ constAlphaVector,
+ oneMinusConstAlpha,
+ colorMask,
+ half);
_mm_storeu_si128((__m128i *)&dst[x], result);
}
}