1 /*
2  * Copyright (c) 2021 - 2024 the ThorVG project. All rights reserved.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10 
11  * The above copyright notice and this permission notice shall be included in all
12  * copies or substantial portions of the Software.
13 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20  * SOFTWARE.
21  */
22 
23 #include "../../lv_conf_internal.h"
24 #if LV_USE_THORVG_INTERNAL
25 
26 #ifdef THORVG_AVX_VECTOR_SUPPORT
27 
28 #include <immintrin.h>
29 
30 #define N_32BITS_IN_128REG 4
31 #define N_32BITS_IN_256REG 8
32 
ALPHA_BLEND(__m128i c,__m128i a)33 static inline __m128i ALPHA_BLEND(__m128i c, __m128i a)
34 {
35     //1. set the masks for the A/G and R/B channels
36     auto AG = _mm_set1_epi32(0xff00ff00);
37     auto RB = _mm_set1_epi32(0x00ff00ff);
38 
39     //2. mask the alpha vector - originally quartet [a, a, a, a]
40     auto aAG = _mm_and_si128(a, AG);
41     auto aRB = _mm_and_si128(a, RB);
42 
43     //3. calculate the alpha blending of the 2nd and 4th channel
44     //- mask the color vector
45     //- multiply it by the masked alpha vector
46     //- add the correction to compensate bit shifting used instead of dividing by 255
47     //- shift bits - corresponding to division by 256
48     auto even = _mm_and_si128(c, RB);
49     even = _mm_mullo_epi16(even, aRB);
50     even =_mm_add_epi16(even, RB);
51     even = _mm_srli_epi16(even, 8);
52 
53     //4. calculate the alpha blending of the 1st and 3rd channel:
54     //- mask the color vector
55     //- multiply it by the corresponding masked alpha vector and store the high bits of the result
56     //- add the correction to compensate division by 256 instead of by 255 (next step)
57     //- remove the low 8 bits to mimic the division by 256
58     auto odd = _mm_and_si128(c, AG);
59     odd = _mm_mulhi_epu16(odd, aAG);
60     odd = _mm_add_epi16(odd, RB);
61     odd = _mm_and_si128(odd, AG);
62 
63     //5. the final result
64     return _mm_or_si128(odd, even);
65 }
66 
67 
avxRasterGrayscale8(uint8_t * dst,uint8_t val,uint32_t offset,int32_t len)68 static void avxRasterGrayscale8(uint8_t* dst, uint8_t val, uint32_t offset, int32_t len)
69 {
70     dst += offset;
71 
72     __m256i vecVal = _mm256_set1_epi8(val);
73 
74     int32_t i = 0;
75     for (; i <= len - 32; i += 32) {
76         _mm256_storeu_si256((__m256i*)(dst + i), vecVal);
77     }
78 
79     for (; i < len; ++i) {
80         dst[i] = val;
81     }
82 }
83 
84 
avxRasterPixel32(uint32_t * dst,uint32_t val,uint32_t offset,int32_t len)85 static void avxRasterPixel32(uint32_t *dst, uint32_t val, uint32_t offset, int32_t len)
86 {
87     //1. calculate how many iterations we need to cover the length
88     uint32_t iterations = len / N_32BITS_IN_256REG;
89     uint32_t avxFilled = iterations * N_32BITS_IN_256REG;
90 
91     //2. set the beginning of the array
92     dst += offset;
93 
94     //3. fill the octets
95     for (uint32_t i = 0; i < iterations; ++i, dst += N_32BITS_IN_256REG) {
96         _mm256_storeu_si256((__m256i*)dst, _mm256_set1_epi32(val));
97     }
98 
99     //4. fill leftovers (in the first step we have to set the pointer to the place where the avx job is done)
100     int32_t leftovers = len - avxFilled;
101     while (leftovers--) *dst++ = val;
102 }
103 
104 
avxRasterTranslucentRect(SwSurface * surface,const SwBBox & region,uint8_t r,uint8_t g,uint8_t b,uint8_t a)105 static bool avxRasterTranslucentRect(SwSurface* surface, const SwBBox& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
106 {
107     auto h = static_cast<uint32_t>(region.max.y - region.min.y);
108     auto w = static_cast<uint32_t>(region.max.x - region.min.x);
109 
110     //32bits channels
111     if (surface->channelSize == sizeof(uint32_t)) {
112         auto color = surface->join(r, g, b, a);
113         auto buffer = surface->buf32 + (region.min.y * surface->stride) + region.min.x;
114 
115         uint32_t ialpha = 255 - a;
116 
117         auto avxColor = _mm_set1_epi32(color);
118         auto avxIalpha = _mm_set1_epi8(ialpha);
119 
120         for (uint32_t y = 0; y < h; ++y) {
121             auto dst = &buffer[y * surface->stride];
122 
123             //1. fill the not aligned memory (for 128-bit registers a 16-bytes alignment is required)
124             auto notAligned = ((uintptr_t)dst & 0xf) / 4;
125             if (notAligned) {
126                 notAligned = (N_32BITS_IN_128REG - notAligned > w ? w : N_32BITS_IN_128REG - notAligned);
127                 for (uint32_t x = 0; x < notAligned; ++x, ++dst) {
128                     *dst = color + ALPHA_BLEND(*dst, ialpha);
129                 }
130             }
131 
132             //2. fill the aligned memory - N_32BITS_IN_128REG pixels processed at once
133             uint32_t iterations = (w - notAligned) / N_32BITS_IN_128REG;
134             uint32_t avxFilled = iterations * N_32BITS_IN_128REG;
135             auto avxDst = (__m128i*)dst;
136             for (uint32_t x = 0; x < iterations; ++x, ++avxDst) {
137                 *avxDst = _mm_add_epi32(avxColor, ALPHA_BLEND(*avxDst, avxIalpha));
138             }
139 
140             //3. fill the remaining pixels
141             int32_t leftovers = w - notAligned - avxFilled;
142             dst += avxFilled;
143             while (leftovers--) {
144                 *dst = color + ALPHA_BLEND(*dst, ialpha);
145                 dst++;
146             }
147         }
148     //8bit grayscale
149     } else if (surface->channelSize == sizeof(uint8_t)) {
150         TVGLOG("SW_ENGINE", "Require AVX Optimization, Channel Size = %d", surface->channelSize);
151         auto buffer = surface->buf8 + (region.min.y * surface->stride) + region.min.x;
152         auto ialpha = ~a;
153         for (uint32_t y = 0; y < h; ++y) {
154             auto dst = &buffer[y * surface->stride];
155             for (uint32_t x = 0; x < w; ++x, ++dst) {
156                 *dst = a + MULTIPLY(*dst, ialpha);
157             }
158         }
159     }
160     return true;
161 }
162 
163 
avxRasterTranslucentRle(SwSurface * surface,const SwRle * rle,uint8_t r,uint8_t g,uint8_t b,uint8_t a)164 static bool avxRasterTranslucentRle(SwSurface* surface, const SwRle* rle, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
165 {
166     auto span = rle->spans;
167 
168     //32bit channels
169     if (surface->channelSize == sizeof(uint32_t)) {
170         auto color = surface->join(r, g, b, a);
171         uint32_t src;
172 
173         for (uint32_t i = 0; i < rle->size; ++i) {
174             auto dst = &surface->buf32[span->y * surface->stride + span->x];
175 
176             if (span->coverage < 255) src = ALPHA_BLEND(color, span->coverage);
177             else src = color;
178 
179         auto ialpha = IA(src);
180 
181             //1. fill the not aligned memory (for 128-bit registers a 16-bytes alignment is required)
182             auto notAligned = ((uintptr_t)dst & 0xf) / 4;
183             if (notAligned) {
184                 notAligned = (N_32BITS_IN_128REG - notAligned > span->len ? span->len : N_32BITS_IN_128REG - notAligned);
185                 for (uint32_t x = 0; x < notAligned; ++x, ++dst) {
186                     *dst = src + ALPHA_BLEND(*dst, ialpha);
187                 }
188             }
189 
190             //2. fill the aligned memory using avx - N_32BITS_IN_128REG pixels processed at once
191             //In order to avoid unnecessary avx variables declarations a check is made whether there are any iterations at all
192             uint32_t iterations = (span->len - notAligned) / N_32BITS_IN_128REG;
193             uint32_t avxFilled = 0;
194             if (iterations > 0) {
195                 auto avxSrc = _mm_set1_epi32(src);
196                 auto avxIalpha = _mm_set1_epi8(ialpha);
197 
198                 avxFilled = iterations * N_32BITS_IN_128REG;
199                 auto avxDst = (__m128i*)dst;
200                 for (uint32_t x = 0; x < iterations; ++x, ++avxDst) {
201                     *avxDst = _mm_add_epi32(avxSrc, ALPHA_BLEND(*avxDst, avxIalpha));
202                 }
203             }
204 
205             //3. fill the remaining pixels
206             int32_t leftovers = span->len - notAligned - avxFilled;
207             dst += avxFilled;
208             while (leftovers--) {
209                 *dst = src + ALPHA_BLEND(*dst, ialpha);
210                 dst++;
211             }
212 
213             ++span;
214         }
215     //8bit grayscale
216     } else if (surface->channelSize == sizeof(uint8_t)) {
217         TVGLOG("SW_ENGINE", "Require AVX Optimization, Channel Size = %d", surface->channelSize);
218         uint8_t src;
219         for (uint32_t i = 0; i < rle->size; ++i, ++span) {
220             auto dst = &surface->buf8[span->y * surface->stride + span->x];
221             if (span->coverage < 255) src = MULTIPLY(span->coverage, a);
222             else src = a;
223             auto ialpha = ~a;
224             for (uint32_t x = 0; x < span->len; ++x, ++dst) {
225                 *dst = src + MULTIPLY(*dst, ialpha);
226             }
227         }
228     }
229     return true;
230 }
231 
232 
233 #endif
234 
235 #endif /* LV_USE_THORVG_INTERNAL */
236 
237