1 /**
2 * @file lv_draw_sw_dither.c
3 *
4 */
5
6 /*********************
7 * INCLUDES
8 *********************/
9 #include "lv_draw_sw_dither.h"
10 #include "lv_draw_sw_gradient.h"
11 #include "../../misc/lv_color.h"
12
13 /**********************
14 * STATIC FUNCTIONS
15 **********************/
16
17
18 #if _DITHER_GRADIENT
19
lv_dither_none(lv_grad_t * grad,lv_coord_t x,lv_coord_t y,lv_coord_t w)20 LV_ATTRIBUTE_FAST_MEM void lv_dither_none(lv_grad_t * grad, lv_coord_t x, lv_coord_t y, lv_coord_t w)
21 {
22 LV_UNUSED(x);
23 LV_UNUSED(y);
24 if(grad == NULL || grad->filled) return;
25 for(lv_coord_t i = 0; i < w; i++) {
26 grad->map[i] = lv_color_hex(grad->hmap[i].full);
27 }
28 grad->filled = 1;
29 }
30
31 static const uint8_t dither_ordered_threshold_matrix[8 * 8] = {
32 0, 48, 12, 60, 3, 51, 15, 63,
33 32, 16, 44, 28, 35, 19, 47, 31,
34 8, 56, 4, 52, 11, 59, 7, 55,
35 40, 24, 36, 20, 43, 27, 39, 23,
36 2, 50, 14, 62, 1, 49, 13, 61,
37 34, 18, 46, 30, 33, 17, 45, 29,
38 10, 58, 6, 54, 9, 57, 5, 53,
39 42, 26, 38, 22, 41, 25, 37, 21
40 }; /* Shift by 6 to normalize */
41
42
lv_dither_ordered_hor(lv_grad_t * grad,lv_coord_t x,lv_coord_t y,lv_coord_t w)43 LV_ATTRIBUTE_FAST_MEM void lv_dither_ordered_hor(lv_grad_t * grad, lv_coord_t x, lv_coord_t y, lv_coord_t w)
44 {
45 LV_UNUSED(x);
46 /* For vertical dithering, the error is spread on the next column (and not next line).
47 Since the renderer is scanline based, it's not obvious what could be used to perform the rendering efficiently.
48 The algorithm below is based on few assumptions:
49 1. An error diffusion algorithm (like Floyd Steinberg) here would be hard to implement since it means that a pixel on column n depends on the pixel on row n
50 2. Instead an ordered dithering algorithm shift the value a bit, but the influence only spread from the matrix size (used 8x8 here)
51 3. It means that a pixel i,j only depends on the value of a pixel i-7, j-7 to i,j and no other one.
52 Then we compute a complete row of ordered dither and store it in out. */
53
54 /*The apply the algorithm for this patch*/
55 for(lv_coord_t j = 0; j < w; j++) {
56 int8_t factor = dither_ordered_threshold_matrix[(y & 7) * 8 + ((j) & 7)] - 32;
57 lv_color32_t tmp = grad->hmap[LV_CLAMP(0, j - 4, grad->size)];
58 lv_color32_t t;
59 t.ch.red = LV_CLAMP(0, tmp.ch.red + factor, 255);
60 t.ch.green = LV_CLAMP(0, tmp.ch.green + factor, 255);
61 t.ch.blue = LV_CLAMP(0, tmp.ch.blue + factor, 255);
62
63 grad->map[j] = lv_color_hex(t.full);
64 }
65 }
lv_dither_ordered_ver(lv_grad_t * grad,lv_coord_t x,lv_coord_t y,lv_coord_t w)66 LV_ATTRIBUTE_FAST_MEM void lv_dither_ordered_ver(lv_grad_t * grad, lv_coord_t x, lv_coord_t y, lv_coord_t w)
67 {
68 /* For vertical dithering, the error is spread on the next column (and not next line).
69 Since the renderer is scanline based, it's not obvious what could be used to perform the rendering efficiently.
70 The algorithm below is based on few assumptions:
71 1. An error diffusion algorithm (like Floyd Steinberg) here would be hard to implement since it means that a pixel on column n depends on the pixel on row n
72 2. Instead an ordered dithering algorithm shift the value a bit, but the influence only spread from the matrix size (used 8x8 here)
73 3. It means that a pixel i,j only depends on the value of a pixel i-7, j-7 to i,j and no other one.
74 Then we compute a complete row of ordered dither and store it in out. */
75
76 /*Extract patch for working with, selected pseudo randomly*/
77 lv_color32_t tmp = grad->hmap[LV_CLAMP(0, y - 4, grad->size)];
78
79 /*The apply the algorithm for this patch*/
80 for(lv_coord_t j = 0; j < 8; j++) {
81 int8_t factor = dither_ordered_threshold_matrix[(y & 7) * 8 + ((j + x) & 7)] - 32;
82 lv_color32_t t;
83 t.ch.red = LV_CLAMP(0, tmp.ch.red + factor, 255);
84 t.ch.green = LV_CLAMP(0, tmp.ch.green + factor, 255);
85 t.ch.blue = LV_CLAMP(0, tmp.ch.blue + factor, 255);
86
87 grad->map[j] = lv_color_hex(t.full);
88 }
89 /*Finally fill the line*/
90 lv_coord_t j = 8;
91 for(; j < w - 8; j += 8) {
92 lv_memcpy(grad->map + j, grad->map, 8 * sizeof(*grad->map));
93 }
94 /* Prevent overwriting */
95 for(; j < w; j++) {
96 grad->map[j] = grad->map[j & 7];
97 }
98 }
99
100
101 #if LV_DITHER_ERROR_DIFFUSION == 1
lv_dither_err_diff_hor(lv_grad_t * grad,lv_coord_t xs,lv_coord_t y,lv_coord_t w)102 LV_ATTRIBUTE_FAST_MEM void lv_dither_err_diff_hor(lv_grad_t * grad, lv_coord_t xs, lv_coord_t y, lv_coord_t w)
103 {
104 LV_UNUSED(xs);
105 LV_UNUSED(y);
106 LV_UNUSED(w);
107
108 /* Implement Floyd Steinberg algorithm, see https://surma.dev/things/ditherpunk/
109 Coefs are: x 7
110 3 5 1
111 / 16
112 Can be implemented as: x (x<<3 - x)
113 (x<<2 - x) (x<<2+x) x
114 */
115 int coef[4] = {0, 0, 0, 0};
116 #define FS_COMPUTE_ERROR(e) { coef[0] = (e<<3) - e; coef[1] = (e<<2) - e; coef[2] = (e<<2) + e; coef[3] = e; }
117 #define FS_COMPONENTS(A, OP, B, C) A.ch.red = LV_CLAMP(0, A.ch.red OP B.r OP C.r, 255); A.ch.green = LV_CLAMP(0, A.ch.green OP B.g OP C.g, 255); A.ch.blue = LV_CLAMP(0, A.ch.blue OP B.b OP C.b, 255);
118 #define FS_QUANT_ERROR(e, t, q) { lv_color32_t u; u.full = lv_color_to32(q); e.r = (int8_t)(t.ch.red - u.ch.red); e.g = (int8_t)(t.ch.green - u.ch.green); e.b = (int8_t)(t.ch.blue - u.ch.blue); }
119 lv_scolor24_t next_px_err = {0}, next_l = {0}, error;
120 /*First last pixel are not dithered */
121 grad->map[0] = lv_color_hex(grad->hmap[0].full);
122 for(lv_coord_t x = 1; x < grad->size - 1; x++) {
123 lv_color32_t t = grad->hmap[x];
124 lv_color_t q;
125 /*Add error term*/
126 FS_COMPONENTS(t, +, next_px_err, next_l);
127 next_l = grad->error_acc[x + 1];
128 /*Quantify*/
129 q = lv_color_hex(t.full);
130 /*Then compute error*/
131 FS_QUANT_ERROR(error, t, q);
132 /*Dither the error*/
133 FS_COMPUTE_ERROR(error.r);
134 next_px_err.r = coef[0] >> 4;
135 grad->error_acc[x - 1].r += coef[1] >> 4;
136 grad->error_acc[x].r += coef[2] >> 4;
137 grad->error_acc[x + 1].r = coef[3] >> 4;
138
139 FS_COMPUTE_ERROR(error.g);
140 next_px_err.g = coef[0] >> 4;
141 grad->error_acc[x - 1].g += coef[1] >> 4;
142 grad->error_acc[x].g += coef[2] >> 4;
143 grad->error_acc[x + 1].g = coef[3] >> 4;
144
145 FS_COMPUTE_ERROR(error.b);
146 next_px_err.b = coef[0] >> 4;
147 grad->error_acc[x - 1].b += coef[1] >> 4;
148 grad->error_acc[x].b += coef[2] >> 4;
149 grad->error_acc[x + 1].b = coef[3] >> 4;
150
151 grad->map[x] = q;
152 }
153 grad->map[grad->size - 1] = lv_color_hex(grad->hmap[grad->size - 1].full);
154 }
155
lv_dither_err_diff_ver(lv_grad_t * grad,lv_coord_t xs,lv_coord_t y,lv_coord_t w)156 LV_ATTRIBUTE_FAST_MEM void lv_dither_err_diff_ver(lv_grad_t * grad, lv_coord_t xs, lv_coord_t y, lv_coord_t w)
157 {
158 /* Try to implement error diffusion on a vertical gradient and an horizontal map using those tricks:
159 Since the given hi-resolution gradient (in src) is vertical, the Floyd Steinberg algorithm pass need to be rotated,
160 so we'll get this instead (from top to bottom):
161
162 A B C
163 1 [ ][ ][ ]
164 2 [ ][ ][ ] Pixel A2 will spread its error on pixel A3 with coefficient 7,
165 3 [ ][ ][ ] Pixel A2 will spread its error on pixel B1 with coefficient 3, B2 with coef 5 and B3 with coef 1
166
167 When taking into account an arbitrary pixel P(i,j), its added error diffusion term is:
168 e(i,j) = 1/16 * [ e(i-1,j) * 5 + e(i-1,j+1) * 3 + e(i-1,j-1) * 1 + e(i,j-1) * 7]
169
170 This means that the error term depends on pixel W, NW, N and SW.
171 If we consider that we are generating the error diffused gradient map from top to bottom, we can remember the previous
172 line (N, NW) in the term above. Also, we remember the (W) term too since we are computing the gradient map from left to right.
173 However, the SW term is painful for us, we can't support it (since to get it, we need its own SW term and so on).
174 Let's remove it and re-dispatch the error factor accordingly so they stays normalized:
175 e(i,j) ~= 1/16 * [ e(i-1,j) * 6 + e(i-1,j-1) * 1 + e(i,j-1) * 9]
176
177 That's the idea of this pseudo Floyd Steinberg dithering */
178 #define FS_APPLY(d, s, c) d.r = (int8_t)(s.r * c) >> 4; d.g = (int8_t)(s.g * c) >> 4; d.b = (int8_t)(s.b * c) >> 4;
179 #define FS_COMPONENTS3(A, OP, B, b, C, c, D, d) \
180 A.ch.red = LV_CLAMP(0, A.ch.red OP ((B.r * b OP C.r * c OP D.r * d) >> 4), 255); \
181 A.ch.green = LV_CLAMP(0, A.ch.green OP ((B.r * b OP C.r * c OP D.r * d) >> 4), 255); \
182 A.ch.blue = LV_CLAMP(0, A.ch.blue OP ((B.r * b OP C.r * c OP D.r * d) >> 4), 255);
183
184 lv_scolor24_t next_px_err, prev_l = grad->error_acc[0];
185 /*Compute the error term for the current pixel (first pixel is never dithered)*/
186 if(xs == 0) {
187 grad->map[0] = lv_color_hex(grad->hmap[y].full);
188 FS_QUANT_ERROR(next_px_err, grad->hmap[y], grad->map[0]);
189 }
190 else {
191 lv_color_t tmp = lv_color_hex(grad->hmap[y].full);
192 lv_color32_t t = grad->hmap[y];
193 FS_QUANT_ERROR(next_px_err, grad->hmap[y], tmp);
194 FS_COMPONENTS3(t, +, next_px_err, 6, prev_l, 1, grad->error_acc[0], 9);
195 grad->map[0] = lv_color_hex(t.full);
196 }
197
198 for(lv_coord_t x = 1; x < w; x++) {
199 lv_color32_t t = grad->hmap[y];
200 lv_color_t q;
201 /*Add the current error term*/
202 FS_COMPONENTS3(t, +, next_px_err, 6, prev_l, 1, grad->error_acc[x], 9);
203 prev_l = grad->error_acc[x];
204 /*Quantize and compute error term*/
205 q = lv_color_hex(t.full);
206 FS_QUANT_ERROR(next_px_err, t, q);
207 /*Store error for next line computation*/
208 grad->error_acc[x] = next_px_err;
209 grad->map[x] = q;
210 }
211 }
212 #endif
213 #endif
214