[cairo-commit] src/cairo-image-compositor.c
Chris Wilson
ickle at kemper.freedesktop.org
Thu Apr 19 03:04:29 PDT 2012
src/cairo-image-compositor.c | 154 ++++++++++++++++++++++++++-----------------
1 file changed, 96 insertions(+), 58 deletions(-)
New commits:
commit 9417fec04a172a7c44be38c1b3d032c3fee4f0d6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Wed Apr 18 20:44:43 2012 +0100
image: Split inline SRC composition
Currently we construct a mask for the entire line and try to process it
in one call to pixman (two without the LERP operator). An alternative
approach is split the row into separate composite operations for the
clear (which we can skip), fully opaque and partial spans.
As the source operator is typically mostly opaque or clear, this is a
good win as we are able to utilise more fast paths. In the worst case,
it degrades to the old method of constructing a whole mask for a row.
It may reduce performance for having to process lots of spans though
(this is where the pixman spans interface should help). However, such
geometry is rare and typically handled elsewhere.
And the existing code has a bug where it was clearing the destination
for clear regions of the mask outside of the spans.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/cairo-image-compositor.c b/src/cairo-image-compositor.c
index 6b95840..9c37585 100644
--- a/src/cairo-image-compositor.c
+++ b/src/cairo-image-compositor.c
@@ -2271,77 +2271,116 @@ _inplace_src_spans (void *abstract_renderer,
unsigned num_spans)
{
cairo_image_span_renderer_t *r = abstract_renderer;
- uint8_t *mask;
- int x0, y0;
+ uint8_t *m;
+ int x0;
if (num_spans == 0)
return CAIRO_STATUS_SUCCESS;
- x0 = r->composite->unbounded.x;
- y0 = r->u.composite.mask_y;
- r->u.composite.mask_y = y + h;
- if (y != y0) {
- pixman_image_composite32 (PIXMAN_OP_CLEAR, r->src, NULL, r->u.composite.dst,
- 0, 0,
- 0, 0,
- x0, y0,
- r->composite->unbounded.width, y - y0);
- }
-
- mask = (uint8_t *)pixman_image_get_data (r->mask);
- if (spans[0].x != x0) {
- memset(mask, 0, spans[0].x - x0);
- mask += spans[0].x - x0;
- }
+ x0 = spans[0].x;
+ m = r->buf;
do {
int len = spans[1].x - spans[0].x;
- *mask++ = spans[0].coverage;
- if (len > 1) {
- memset (mask, spans[0].coverage, --len);
- mask += len;
- }
- spans++;
- } while (--num_spans > 1);
- if (spans[0].x-x0 != r->composite->unbounded.width)
- memset(mask, 0, r->composite->unbounded.width+x0 - spans[0].x);
-
+ if (spans[0].coverage == 0xff) {
+ if (spans[0].x != x0) {
#if PIXMAN_HAS_OP_LERP
- pixman_image_composite32 (PIXMAN_OP_LERP_SRC, r->src, r->mask, r->u.composite.dst,
- x0 + r->u.composite.src_x,
- y + r->u.composite.src_y,
- 0, 0,
- x0, y,
- r->composite->unbounded.width, h);
+ pixman_image_composite32 (PIXMAN_OP_LERP_SRC,
+ r->src, r->mask, r->u.composite.dst,
+ x0 + r->u.composite.src_x,
+ y + r->u.composite.src_y,
+ 0, 0,
+ x0, y,
+ spans[0].x - x0, h);
#else
- pixman_image_composite32 (PIXMAN_OP_OUT_REVERSE, r->mask, NULL, r->u.composite.dst,
- 0, 0,
- 0, 0,
- x0, y,
- r->composite->unbounded.width, h);
- pixman_image_composite32 (PIXMAN_OP_ADD, r->src, r->mask, r->u.composite.dst,
- x0 + r->u.composite.src_x,
- y + r->u.composite.src_y,
- 0, 0,
- x0, y,
- r->composite->unbounded.width, h);
+ pixman_image_composite32 (PIXMAN_OP_OUT_REVERSE,
+ r->mask, NULL, r->u.composite.dst,
+ 0, 0,
+ 0, 0,
+ x0, y,
+ spans[0].x - x0, h);
+ pixman_image_composite32 (PIXMAN_OP_ADD,
+ r->src, r->mask, r->u.composite.dst,
+ x0 + r->u.composite.src_x,
+ y + r->u.composite.src_y,
+ 0, 0,
+ x0, y,
+ spans[0].x - x0, h);
#endif
+ }
- return CAIRO_STATUS_SUCCESS;
-}
+ pixman_image_composite32 (PIXMAN_OP_SRC,
+ r->src, NULL, r->u.composite.dst,
+ spans[0].x + r->u.composite.src_x,
+ y + r->u.composite.src_y,
+ 0, 0,
+ spans[0].x, y,
+ spans[1].x - spans[0].x, h);
-static cairo_status_t
-_inplace_src_finish (void *abstract_renderer)
-{
- cairo_image_span_renderer_t *r = abstract_renderer;
- int y0 = r->u.composite.mask_y;
- int y1 = r->composite->unbounded.y + r->composite->unbounded.height;
+ m = r->buf;
+ x0 = spans[1].x;
+ } else if (spans[0].coverage == 0x0) {
+ if (spans[0].x != x0) {
+#if PIXMAN_HAS_OP_LERP
+ pixman_image_composite32 (PIXMAN_OP_LERP_SRC,
+ r->src, r->mask, r->u.composite.dst,
+ x0 + r->u.composite.src_x,
+ y + r->u.composite.src_y,
+ 0, 0,
+ x0, y,
+ spans[0].x - x0, h);
+#else
+ pixman_image_composite32 (PIXMAN_OP_OUT_REVERSE,
+ r->mask, NULL, r->u.composite.dst,
+ 0, 0,
+ 0, 0,
+ x0, y,
+ spans[0].x - x0, h);
+ pixman_image_composite32 (PIXMAN_OP_ADD,
+ r->src, r->mask, r->u.composite.dst,
+ x0 + r->u.composite.src_x,
+ y + r->u.composite.src_y,
+ 0, 0,
+ x0, y,
+ spans[0].x - x0, h);
+#endif
+ }
+
+ m = r->buf;
+ x0 = spans[1].x;
+ } else {
+ *m++ = spans[0].coverage;
+ if (len > 1) {
+ memset (m, spans[0].coverage, --len);
+ m += len;
+ }
+ }
+ spans++;
+ } while (--num_spans > 1);
- if (y0 != y1) {
- pixman_image_composite32 (PIXMAN_OP_CLEAR, r->src, NULL, r->u.composite.dst,
+ if (spans[0].x != x0) {
+#if PIXMAN_HAS_OP_LERP
+ pixman_image_composite32 (PIXMAN_OP_LERP_SRC,
+ r->src, r->mask, r->u.composite.dst,
+ x0 + r->u.composite.src_x,
+ y + r->u.composite.src_y,
+ 0, 0,
+ x0, y,
+ spans[0].x - x0, h);
+#else
+ pixman_image_composite32 (PIXMAN_OP_OUT_REVERSE,
+ r->mask, NULL, r->u.composite.dst,
+ 0, 0,
0, 0,
+ x0, y,
+ spans[0].x - x0, h);
+ pixman_image_composite32 (PIXMAN_OP_ADD,
+ r->src, r->mask, r->u.composite.dst,
+ x0 + r->u.composite.src_x,
+ y + r->u.composite.src_y,
0, 0,
- r->composite->unbounded.x, y0,
- r->composite->unbounded.width, y1 - y0);
+ x0, y,
+ spans[0].x - x0, h);
+#endif
}
return CAIRO_STATUS_SUCCESS;
@@ -2454,7 +2493,6 @@ inplace_renderer_init (cairo_image_span_renderer_t *r,
r->op = PIXMAN_OP_SRC;
} else if (composite->op == CAIRO_OPERATOR_SOURCE) {
r->base.render_rows = _inplace_src_spans;
- r->base.finish = _inplace_src_finish;
r->u.composite.mask_y = r->composite->unbounded.y;
width = (composite->unbounded.width + 3) & ~3;
} else if (composite->op == CAIRO_OPERATOR_CLEAR) {
More information about the cairo-commit
mailing list