summaryrefslogtreecommitdiffstats
path: root/ksvg/impl/libs/art_support
diff options
context:
space:
mode:
Diffstat (limited to 'ksvg/impl/libs/art_support')
-rw-r--r--ksvg/impl/libs/art_support/art_misc.c110
-rw-r--r--ksvg/impl/libs/art_support/art_misc.h4
-rw-r--r--ksvg/impl/libs/art_support/art_rgba_svp.c58
-rw-r--r--ksvg/impl/libs/art_support/art_rgba_svp.h6
4 files changed, 89 insertions, 89 deletions
diff --git a/ksvg/impl/libs/art_support/art_misc.c b/ksvg/impl/libs/art_support/art_misc.c
index 69b45306..6645fe2d 100644
--- a/ksvg/impl/libs/art_support/art_misc.c
+++ b/ksvg/impl/libs/art_support/art_misc.c
@@ -464,7 +464,7 @@ struct _ksvgArtRgbAffineClipAlphaData
int src_width;
int src_height;
int src_rowstride;
- const art_u8 *mask;
+ const art_u8 *tqmask;
int y0;
};
@@ -592,7 +592,7 @@ ksvg_art_rgb_affine_clip_callback (void *callback_data, int y,
}
static
-void ksvg_art_rgb_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6],
+void ksvg_art_rgb_affine_clip_tqmask_run(art_u8 *dst_p, const art_u8 *tqmask, int x0, int x1, int y, const double inv[6],
int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height)
{
const art_u8 *src_p;
@@ -626,7 +626,7 @@ void ksvg_art_rgb_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0
srcAlpha = alpha * src_p[3] + 0x80;
srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8;
- srcAlpha = (srcAlpha * *mask++) + 0x80;
+ srcAlpha = (srcAlpha * *tqmask++) + 0x80;
srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8;
d = *dst_p;
@@ -656,13 +656,13 @@ void ksvg_art_rgb_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0
else
{
dst_p += 3;
- mask++;
+ tqmask++;
}
}
}
static void
-ksvg_art_rgb_affine_clip_mask_callback (void *callback_data, int y,
+ksvg_art_rgb_affine_clip_tqmask_callback (void *callback_data, int y,
int start, ArtSVPRenderAAStep *steps, int n_steps)
{
ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data;
@@ -673,14 +673,14 @@ ksvg_art_rgb_affine_clip_mask_callback (void *callback_data, int y,
int k;
int *alphatab;
int alpha;
- const art_u8 *maskbuf;
+ const art_u8 *tqmaskbuf;
linebuf = data->dst;
x0 = data->x0;
x1 = data->x1;
alphatab = data->alphatab;
- maskbuf = data->mask + (y - data->y0) * (x1 - x0);
+ tqmaskbuf = data->tqmask + (y - data->y0) * (x1 - x0);
if(n_steps > 0)
{
@@ -689,7 +689,7 @@ ksvg_art_rgb_affine_clip_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_affine_clip_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_affine_clip_tqmask_run(linebuf, tqmaskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
for(k = 0; k < n_steps - 1; k++)
@@ -701,7 +701,7 @@ ksvg_art_rgb_affine_clip_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_affine_clip_mask_run(linebuf + (run_x0 - x0) * 3, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_affine_clip_tqmask_run(linebuf + (run_x0 - x0) * 3, tqmaskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
running_sum += steps[k].delta;
@@ -709,14 +709,14 @@ ksvg_art_rgb_affine_clip_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_affine_clip_mask_run(linebuf + (run_x1 - x0) * 3, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_affine_clip_tqmask_run(linebuf + (run_x1 - x0) * 3, tqmaskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
else
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_affine_clip_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_affine_clip_tqmask_run(linebuf, tqmaskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
data->dst += data->dst_rowstride;
@@ -853,7 +853,7 @@ ksvg_art_rgba_affine_clip_callback (void *callback_data, int y,
}
static
-void ksvg_art_rgba_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6],
+void ksvg_art_rgba_affine_clip_tqmask_run(art_u8 *dst_p, const art_u8 *tqmask, int x0, int x1, int y, const double inv[6],
int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height)
{
const art_u8 *src_p;
@@ -887,7 +887,7 @@ void ksvg_art_rgba_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x
srcAlpha = alpha * src_p[3] + 0x80;
srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8;
- srcAlpha = (srcAlpha * *mask++) + 0x80;
+ srcAlpha = (srcAlpha * *tqmask++) + 0x80;
srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8;
d = *dst_p;
@@ -924,13 +924,13 @@ void ksvg_art_rgba_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x
else
{
dst_p += 4;
- mask++;
+ tqmask++;
}
}
}
static void
-ksvg_art_rgba_affine_clip_mask_callback (void *callback_data, int y,
+ksvg_art_rgba_affine_clip_tqmask_callback (void *callback_data, int y,
int start, ArtSVPRenderAAStep *steps, int n_steps)
{
ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data;
@@ -941,14 +941,14 @@ ksvg_art_rgba_affine_clip_mask_callback (void *callback_data, int y,
int k;
int *alphatab;
int alpha;
- const art_u8 *maskbuf;
+ const art_u8 *tqmaskbuf;
linebuf = data->dst;
x0 = data->x0;
x1 = data->x1;
alphatab = data->alphatab;
- maskbuf = data->mask + (y - data->y0) * (x1 - x0);
+ tqmaskbuf = data->tqmask + (y - data->y0) * (x1 - x0);
if(n_steps > 0)
{
@@ -957,7 +957,7 @@ ksvg_art_rgba_affine_clip_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_affine_clip_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_affine_clip_tqmask_run(linebuf, tqmaskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
for(k = 0; k < n_steps - 1; k++)
@@ -969,7 +969,7 @@ ksvg_art_rgba_affine_clip_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_affine_clip_mask_run(linebuf + (run_x0 - x0) * 4, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_affine_clip_tqmask_run(linebuf + (run_x0 - x0) * 4, tqmaskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
running_sum += steps[k].delta;
@@ -977,14 +977,14 @@ ksvg_art_rgba_affine_clip_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_affine_clip_mask_run(linebuf + (run_x1 - x0) * 4, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_affine_clip_tqmask_run(linebuf + (run_x1 - x0) * 4, tqmaskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
else
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_affine_clip_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_affine_clip_tqmask_run(linebuf, tqmaskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
data->dst += data->dst_rowstride;
@@ -1030,7 +1030,7 @@ void ksvg_art_rgb_affine_clip(const ArtSVP *svp, art_u8 *dst, int x0, int y0, in
const art_u8 *src,
int src_width, int src_height, int src_rowstride,
const double affine[6],
- int alpha, const art_u8 *mask)
+ int alpha, const art_u8 *tqmask)
{
ksvgArtRgbAffineClipAlphaData data;
int i;
@@ -1052,7 +1052,7 @@ void ksvg_art_rgb_affine_clip(const ArtSVP *svp, art_u8 *dst, int x0, int y0, in
data.x0 = x0;
data.x1 = x1;
data.y0 = y0;
- data.mask = mask;
+ data.tqmask = tqmask;
art_affine_invert(data.inv, affine);
@@ -1063,15 +1063,15 @@ void ksvg_art_rgb_affine_clip(const ArtSVP *svp, art_u8 *dst, int x0, int y0, in
if(dst_channels == 3)
{
- if(mask)
- art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_affine_clip_mask_callback, &data);
+ if(tqmask)
+ art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_affine_clip_tqmask_callback, &data);
else
art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_affine_clip_callback, &data);
}
else
{
- if(mask)
- art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_affine_clip_mask_callback, &data);
+ if(tqmask)
+ art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_affine_clip_tqmask_callback, &data);
else
art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_affine_clip_callback, &data);
}
@@ -1228,7 +1228,7 @@ ksvg_art_rgb_texture_callback (void *callback_data, int y,
}
static
-void ksvg_art_rgb_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6],
+void ksvg_art_rgb_texture_tqmask_run(art_u8 *dst_p, const art_u8 *tqmask, int x0, int x1, int y, const double inv[6],
int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height)
{
const art_u8 *src_p;
@@ -1277,11 +1277,11 @@ void ksvg_art_rgb_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, in
/* Pattern source is in RGBA format, premultiplied.
* alpha represents fill/stroke/group opacity.
*
- * Multiply source alpha by 'alpha' and mask value then composite over.
- * For each channel, d = d + alpha * mask * (s - srcAlpha * d).
+ * Multiply source alpha by 'alpha' and tqmask value then composite over.
+ * For each channel, d = d + alpha * tqmask * (s - srcAlpha * d).
*/
- am = (alpha * *mask++) + 0x80;
+ am = (alpha * *tqmask++) + 0x80;
am = (am + (am >> 8)) >> 8;
srcAlpha = src_p[3];
@@ -1322,7 +1322,7 @@ void ksvg_art_rgb_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, in
}
static void
-ksvg_art_rgb_texture_mask_callback (void *callback_data, int y,
+ksvg_art_rgb_texture_tqmask_callback (void *callback_data, int y,
int start, ArtSVPRenderAAStep *steps, int n_steps)
{
ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data;
@@ -1333,7 +1333,7 @@ ksvg_art_rgb_texture_mask_callback (void *callback_data, int y,
int k;
int *alphatab;
int alpha;
- const art_u8 *maskbuf;
+ const art_u8 *tqmaskbuf;
linebuf = data->dst;
x0 = data->x0;
@@ -1341,7 +1341,7 @@ ksvg_art_rgb_texture_mask_callback (void *callback_data, int y,
alphatab = data->alphatab;
- maskbuf = data->mask + (y - data->y0) * (x1 - x0);
+ tqmaskbuf = data->tqmask + (y - data->y0) * (x1 - x0);
if(n_steps > 0)
{
@@ -1350,7 +1350,7 @@ ksvg_art_rgb_texture_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_texture_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_texture_tqmask_run(linebuf, tqmaskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
for(k = 0; k < n_steps - 1; k++)
@@ -1362,7 +1362,7 @@ ksvg_art_rgb_texture_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_texture_mask_run(linebuf + (run_x0 - x0) * 3, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_texture_tqmask_run(linebuf + (run_x0 - x0) * 3, tqmaskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
running_sum += steps[k].delta;
@@ -1370,14 +1370,14 @@ ksvg_art_rgb_texture_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_texture_mask_run(linebuf + (run_x1 - x0) * 3, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_texture_tqmask_run(linebuf + (run_x1 - x0) * 3, tqmaskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
else
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgb_texture_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgb_texture_tqmask_run(linebuf, tqmaskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
data->dst += data->dst_rowstride;
@@ -1545,7 +1545,7 @@ ksvg_art_rgba_texture_callback (void *callback_data, int y,
}
static
-void ksvg_art_rgba_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6],
+void ksvg_art_rgba_texture_tqmask_run(art_u8 *dst_p, const art_u8 *tqmask, int x0, int x1, int y, const double inv[6],
int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height)
{
const art_u8 *src_p;
@@ -1594,11 +1594,11 @@ void ksvg_art_rgba_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, i
/* Pattern source is in RGBA format, premultiplied.
* alpha represents fill/stroke/group opacity.
*
- * Multiply source alpha by 'alpha' and mask value then composite over.
- * For each channel, d = d + alpha * mask * (s - srcAlpha * d).
+ * Multiply source alpha by 'alpha' and tqmask value then composite over.
+ * For each channel, d = d + alpha * tqmask * (s - srcAlpha * d).
*/
- am = (alpha * *mask++) + 0x80;
+ am = (alpha * *tqmask++) + 0x80;
am = (am + (am >> 8)) >> 8;
srcAlpha = src_p[3];
@@ -1636,7 +1636,7 @@ void ksvg_art_rgba_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, i
*dst_p++ = d + tmp2;
- /* dstAlpha = dstAlpha + srcAlpha * alpha * mask * (1 - dstAlpha) */
+ /* dstAlpha = dstAlpha + srcAlpha * alpha * tqmask * (1 - dstAlpha) */
d = *dst_p;
tmp = srcAlpha * am + 0x80;
@@ -1651,7 +1651,7 @@ void ksvg_art_rgba_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, i
}
static void
-ksvg_art_rgba_texture_mask_callback (void *callback_data, int y,
+ksvg_art_rgba_texture_tqmask_callback (void *callback_data, int y,
int start, ArtSVPRenderAAStep *steps, int n_steps)
{
ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data;
@@ -1662,7 +1662,7 @@ ksvg_art_rgba_texture_mask_callback (void *callback_data, int y,
int k;
int *alphatab;
int alpha;
- const art_u8 *maskbuf;
+ const art_u8 *tqmaskbuf;
linebuf = data->dst;
x0 = data->x0;
@@ -1670,7 +1670,7 @@ ksvg_art_rgba_texture_mask_callback (void *callback_data, int y,
alphatab = data->alphatab;
- maskbuf = data->mask + (y - data->y0) * (x1 - x0);
+ tqmaskbuf = data->tqmask + (y - data->y0) * (x1 - x0);
if(n_steps > 0)
{
@@ -1679,7 +1679,7 @@ ksvg_art_rgba_texture_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_texture_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_texture_tqmask_run(linebuf, tqmaskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
for(k = 0; k < n_steps - 1; k++)
@@ -1691,7 +1691,7 @@ ksvg_art_rgba_texture_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_texture_mask_run(linebuf + (run_x0 - x0) * 4, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_texture_tqmask_run(linebuf + (run_x0 - x0) * 4, tqmaskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
running_sum += steps[k].delta;
@@ -1699,14 +1699,14 @@ ksvg_art_rgba_texture_mask_callback (void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_texture_mask_run(linebuf + (run_x1 - x0) * 4, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_texture_tqmask_run(linebuf + (run_x1 - x0) * 4, tqmaskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
}
else
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- ksvg_art_rgba_texture_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
+ ksvg_art_rgba_texture_tqmask_run(linebuf, tqmaskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height);
}
data->dst += data->dst_rowstride;
@@ -1756,7 +1756,7 @@ void ksvg_art_rgb_texture(const ArtSVP *svp, art_u8 *dst, int x0, int y0, int x1
ArtFilterLevel level,
ArtAlphaGamma *alphaGamma,
int alpha,
- const art_u8 *mask)
+ const art_u8 *tqmask)
{
ksvgArtRgbAffineClipAlphaData data;
int i;
@@ -1790,15 +1790,15 @@ void ksvg_art_rgb_texture(const ArtSVP *svp, art_u8 *dst, int x0, int y0, int x1
data.src_height = src_height;
data.src_rowstride = src_rowstride;
- data.mask = mask;
+ data.tqmask = tqmask;
data.y0 = y0;
- if(mask)
+ if(tqmask)
{
if(dst_channels == 3)
- art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_texture_mask_callback, &data);
+ art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_texture_tqmask_callback, &data);
else
- art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_texture_mask_callback, &data);
+ art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_texture_tqmask_callback, &data);
}
else
{
diff --git a/ksvg/impl/libs/art_support/art_misc.h b/ksvg/impl/libs/art_support/art_misc.h
index 52f09a63..7311d4b7 100644
--- a/ksvg/impl/libs/art_support/art_misc.h
+++ b/ksvg/impl/libs/art_support/art_misc.h
@@ -58,7 +58,7 @@ extern "C" {
const art_u8 *src,
int src_width, int src_height, int src_rowstride,
const double affine[6],
- int alpha, const art_u8 *mask);
+ int alpha, const art_u8 *tqmask);
void ksvg_art_rgb_texture(const ArtSVP *svp, art_u8 *dst, int x0, int y0, int x1, int y1, int dst_rowstride,
int dst_channels,
@@ -68,7 +68,7 @@ extern "C" {
ArtFilterLevel level,
ArtAlphaGamma *alphaGamma,
int alpha,
- const art_u8 *mask);
+ const art_u8 *tqmask);
void ksvg_art_svp_move(ArtSVP *svp, int dx, int dy);
diff --git a/ksvg/impl/libs/art_support/art_rgba_svp.c b/ksvg/impl/libs/art_support/art_rgba_svp.c
index 47c7d924..ea2c1476 100644
--- a/ksvg/impl/libs/art_support/art_rgba_svp.c
+++ b/ksvg/impl/libs/art_support/art_rgba_svp.c
@@ -37,7 +37,7 @@ struct _ArtKSVGRgbaSVPAlphaData {
art_u8 r, g, b, alpha;
art_u32 rgba;
art_u8 *buf;
- art_u8 *mask;
+ art_u8 *tqmask;
int rowstride;
int x0, x1;
int y0;
@@ -124,7 +124,7 @@ art_ksvg_rgba_run_alpha (art_u8 *buf, art_u8 r, art_u8 g, art_u8 b, int alpha, i
}
static void
-art_ksvg_rgba_mask_run_alpha (art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n)
+art_ksvg_rgba_tqmask_run_alpha (art_u8 *buf, art_u8 *tqmask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n)
{
int i;
int v;
@@ -136,7 +136,7 @@ art_ksvg_rgba_mask_run_alpha (art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art
for(i = 0; i < n; i++)
{
- am = (alpha * *mask++) + 0x80;
+ am = (alpha * *tqmask++) + 0x80;
am = (am + (am >> 8)) >> 8;
v = *buf;
@@ -330,7 +330,7 @@ art_ksvg_rgba_svp_alpha_opaque_callback(void *callback_data, int y,
}
static void
-art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
+art_ksvg_rgba_svp_alpha_tqmask_callback(void *callback_data, int y,
int start, ArtSVPRenderAAStep *steps, int n_steps)
{
ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data;
@@ -342,7 +342,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
art_u8 r, g, b;
int *alphatab;
int alpha;
- art_u8 *maskbuf;
+ art_u8 *tqmaskbuf;
linebuf = data->buf;
x0 = data->x0;
@@ -353,7 +353,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
b = data->b;
alphatab = data->alphatab;
- maskbuf = data->mask + (y - data->y0) * (data->x1 - data->x0);
+ tqmaskbuf = data->tqmask + (y - data->y0) * (data->x1 - data->x0);
if(n_steps > 0)
{
@@ -362,7 +362,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgba_mask_run_alpha (linebuf, maskbuf,
+ art_ksvg_rgba_tqmask_run_alpha (linebuf, tqmaskbuf,
r, g, b, alphatab[alpha],
run_x1 - x0);
}
@@ -376,7 +376,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgba_mask_run_alpha (linebuf + (run_x0 - x0) * 4, maskbuf + (run_x0 - x0),
+ art_ksvg_rgba_tqmask_run_alpha (linebuf + (run_x0 - x0) * 4, tqmaskbuf + (run_x0 - x0),
r, g, b, alphatab[alpha],
run_x1 - run_x0);
}
@@ -386,7 +386,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgba_mask_run_alpha (linebuf + (run_x1 - x0) * 4, maskbuf + (run_x1 - x0) ,
+ art_ksvg_rgba_tqmask_run_alpha (linebuf + (run_x1 - x0) * 4, tqmaskbuf + (run_x1 - x0) ,
r, g, b, alphatab[alpha],
x1 - run_x1);
}
@@ -395,7 +395,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgba_mask_run_alpha (linebuf, maskbuf,
+ art_ksvg_rgba_tqmask_run_alpha (linebuf, tqmaskbuf,
r, g, b, alphatab[alpha],
x1 - x0);
}
@@ -415,7 +415,7 @@ art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y,
* @rowstride: Rowstride of @buf buffer.
* @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing.
*
- * Renders the shape specified with @svp over the @buf RGB buffer.
+ * Renders the tqshape specified with @svp over the @buf RGB buffer.
* @x1 - @x0 specifies the width, and @y1 - @y0 specifies the height,
* of the rectangle rendered. The new pixels are stored starting at
* the first byte of @buf. Thus, the @x0 and @y0 parameters specify
@@ -436,7 +436,7 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp,
art_u32 rgba,
art_u8 *buf, int rowstride,
ArtAlphaGamma *alphagamma,
- art_u8 *mask)
+ art_u8 *tqmask)
{
ArtKSVGRgbaSVPAlphaData data;
int r, g, b;
@@ -454,7 +454,7 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp,
data.b = b;
data.alpha = alpha;
data.rgba = rgba;
- data.mask = mask;
+ data.tqmask = tqmask;
a = 0x8000;
da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */
@@ -471,8 +471,8 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp,
data.x1 = x1;
data.y0 = y0;
- if(mask)
- art_svp_render_aa (svp, x0, y0, x1, y1, art_ksvg_rgba_svp_alpha_mask_callback, &data);
+ if(tqmask)
+ art_svp_render_aa (svp, x0, y0, x1, y1, art_ksvg_rgba_svp_alpha_tqmask_callback, &data);
else
{
if (alpha == 255)
@@ -485,7 +485,7 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp,
/* RGB renderers */
static void
-art_ksvg_rgb_mask_run_alpha(art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n)
+art_ksvg_rgb_tqmask_run_alpha(art_u8 *buf, art_u8 *tqmask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n)
{
int i;
int v;
@@ -497,7 +497,7 @@ art_ksvg_rgb_mask_run_alpha(art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u
for(i = 0; i < n; i++)
{
- am = (alpha * *mask++) + 0x80;
+ am = (alpha * *tqmask++) + 0x80;
am = (am + (am >> 8)) >> 8;
v = *buf;
@@ -515,7 +515,7 @@ art_ksvg_rgb_mask_run_alpha(art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u
}
static void
-art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
+art_ksvg_rgb_svp_alpha_tqmask_callback(void *callback_data, int y,
int start, ArtSVPRenderAAStep *steps, int n_steps)
{
ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data;
@@ -527,7 +527,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
art_u8 r, g, b;
int *alphatab;
int alpha;
- art_u8 *maskbuf;
+ art_u8 *tqmaskbuf;
linebuf = data->buf;
x0 = data->x0;
@@ -538,7 +538,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
b = data->b;
alphatab = data->alphatab;
- maskbuf = data->mask + (y - data->y0) * (data->x1 - data->x0);
+ tqmaskbuf = data->tqmask + (y - data->y0) * (data->x1 - data->x0);
if(n_steps > 0)
{
@@ -547,7 +547,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgb_mask_run_alpha (linebuf, maskbuf,
+ art_ksvg_rgb_tqmask_run_alpha (linebuf, tqmaskbuf,
r, g, b, alphatab[alpha],
run_x1 - x0);
}
@@ -561,7 +561,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgb_mask_run_alpha (linebuf + (run_x0 - x0) * 3, maskbuf + (run_x0 - x0),
+ art_ksvg_rgb_tqmask_run_alpha (linebuf + (run_x0 - x0) * 3, tqmaskbuf + (run_x0 - x0),
r, g, b, alphatab[alpha],
run_x1 - run_x0);
}
@@ -571,7 +571,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgb_mask_run_alpha (linebuf + (run_x1 - x0) * 3, maskbuf + (run_x1 - x0) ,
+ art_ksvg_rgb_tqmask_run_alpha (linebuf + (run_x1 - x0) * 3, tqmaskbuf + (run_x1 - x0) ,
r, g, b, alphatab[alpha],
x1 - run_x1);
}
@@ -580,7 +580,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
{
alpha = (running_sum >> 16) & 0xff;
if(alpha)
- art_ksvg_rgb_mask_run_alpha (linebuf, maskbuf,
+ art_ksvg_rgb_tqmask_run_alpha (linebuf, tqmaskbuf,
r, g, b, alphatab[alpha],
x1 - x0);
}
@@ -600,7 +600,7 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
* @rowstride: Rowstride of @buf buffer.
* @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing.
*
- * Renders the shape specified with @svp over the @buf RGB buffer.
+ * Renders the tqshape specified with @svp over the @buf RGB buffer.
* @x1 - @x0 specifies the width, and @y1 - @y0 specifies the height,
* of the rectangle rendered. The new pixels are stored starting at
* the first byte of @buf. Thus, the @x0 and @y0 parameters specify
@@ -616,12 +616,12 @@ art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y,
* @alphagamma is NULL.
**/
void
-art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp,
+art_ksvg_rgb_svp_alpha_tqmask(const ArtSVP *svp,
int x0, int y0, int x1, int y1,
art_u32 rgba,
art_u8 *buf, int rowstride,
ArtAlphaGamma *alphagamma,
- art_u8 *mask)
+ art_u8 *tqmask)
{
ArtKSVGRgbaSVPAlphaData data;
int r, g, b, alpha;
@@ -637,7 +637,7 @@ art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp,
data.g = g;
data.b = b;
data.alpha = alpha;
- data.mask = mask;
+ data.tqmask = tqmask;
a = 0x8000;
da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */
@@ -654,6 +654,6 @@ art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp,
data.x1 = x1;
data.y0 = y0;
- art_svp_render_aa(svp, x0, y0, x1, y1, art_ksvg_rgb_svp_alpha_mask_callback, &data);
+ art_svp_render_aa(svp, x0, y0, x1, y1, art_ksvg_rgb_svp_alpha_tqmask_callback, &data);
}
diff --git a/ksvg/impl/libs/art_support/art_rgba_svp.h b/ksvg/impl/libs/art_support/art_rgba_svp.h
index b59096d4..860eabcc 100644
--- a/ksvg/impl/libs/art_support/art_rgba_svp.h
+++ b/ksvg/impl/libs/art_support/art_rgba_svp.h
@@ -40,15 +40,15 @@ art_ksvg_rgba_svp_alpha(const ArtSVP *svp,
art_u32 rgba,
art_u8 *buf, int rowstride,
ArtAlphaGamma *alphagamma,
- art_u8 *mask);
+ art_u8 *tqmask);
void
-art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp,
+art_ksvg_rgb_svp_alpha_tqmask(const ArtSVP *svp,
int x0, int y0, int x1, int y1,
art_u32 rgba,
art_u8 *buf, int rowstride,
ArtAlphaGamma *alphagamma,
- art_u8 *mask);
+ art_u8 *tqmask);
#ifdef __cplusplus
}