[cairo-commit] 7 commits - perf/micro src/cairo-bentley-ottmann-rectangular.c src/cairo-clip-private.h src/cairo-image-surface.c src/cairo-tor-scan-converter.c test/extended-blend-alpha-mask.argb32.ref.png test/extended-blend-alpha-mask.rgb24.ref.png test/extended-blend.c test/extended-blend-mask.argb32.ref.png test/extended-blend-mask.rgb24.ref.png test/Makefile.refs

Chris Wilson ickle at kemper.freedesktop.org
Mon Aug 1 04:12:30 PDT 2011


 perf/micro/dragon.c                           |   34 ++++++++++++++++
 src/cairo-bentley-ottmann-rectangular.c       |   54 ++++++++++++++++++++++++--
 src/cairo-clip-private.h                      |    2 
 src/cairo-image-surface.c                     |   24 +++++------
 src/cairo-tor-scan-converter.c                |   44 ++++++++++++++-------
 test/Makefile.refs                            |    4 +
 test/extended-blend-alpha-mask.argb32.ref.png |binary
 test/extended-blend-alpha-mask.rgb24.ref.png  |binary
 test/extended-blend-mask.argb32.ref.png       |binary
 test/extended-blend-mask.rgb24.ref.png        |binary
 test/extended-blend.c                         |   45 +++++++++++++++++++++
 11 files changed, 176 insertions(+), 31 deletions(-)

New commits:
commit c4f4c5726194c9cd800e5d6d9a09c7d01a4dadd7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 1 00:12:24 2011 +0100

    bo-rectangular: perform an incremental sort
    
    Bucketing the rectangles together on their top-scanline and then sorting
    within that scanline is significantly faster for dragon despite the extra
    passes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/cairo-bentley-ottmann-rectangular.c b/src/cairo-bentley-ottmann-rectangular.c
index ceeddd3..f55daac 100644
--- a/src/cairo-bentley-ottmann-rectangular.c
+++ b/src/cairo-bentley-ottmann-rectangular.c
@@ -261,7 +261,6 @@ sweep_line_init (sweep_line_t	 *sweep_line,
 		 int		  num_rectangles,
 		 cairo_fill_rule_t fill_rule)
 {
-    _rectangle_sort (rectangles, num_rectangles);
     rectangles[num_rectangles] = NULL;
     sweep_line->rectangles = rectangles;
 
@@ -702,6 +701,8 @@ _cairo_bentley_ottmann_tessellate_rectangular_traps (cairo_traps_t *traps,
 
 	rectangles_ptrs[i] = &rectangles[i];
     }
+    /* XXX incremental sort */
+    _rectangle_sort (rectangles_ptrs, i);
 
     _cairo_traps_clear (traps);
     status = _cairo_bentley_ottmann_tessellate_rectangular (rectangles_ptrs, i,
@@ -727,9 +728,11 @@ _cairo_bentley_ottmann_tessellate_boxes (const cairo_boxes_t *in,
     rectangle_t *rectangles;
     rectangle_t *stack_rectangles_ptrs[ARRAY_LENGTH (stack_rectangles) + 1];
     rectangle_t **rectangles_ptrs;
+    rectangle_t *stack_rectangles_chain[CAIRO_STACK_ARRAY_LENGTH (rectangle_t *) ];
+    rectangle_t **rectangles_chain;
     const struct _cairo_boxes_chunk *chunk;
     cairo_status_t status;
-    int i, j;
+    int i, j, y_min, y_max;
 
     if (unlikely (in->num_boxes == 0)) {
 	_cairo_boxes_clear (out);
@@ -751,6 +754,28 @@ _cairo_bentley_ottmann_tessellate_boxes (const cairo_boxes_t *in,
 	return CAIRO_STATUS_SUCCESS;
     }
 
+    y_min = INT_MAX; y_max = INT_MIN;
+    for (chunk = &in->chunks; chunk != NULL; chunk = chunk->next) {
+	const cairo_box_t *box = chunk->base;
+	for (i = 0; i < chunk->count; i++) {
+	    if (box[i].p1.y < y_min)
+		y_min = box[i].p1.y;
+	    if (box[i].p1.y > y_max)
+		y_max = box[i].p1.y;
+	}
+    }
+    y_min = _cairo_fixed_integer_floor (y_min);
+    y_max = _cairo_fixed_integer_floor (y_max) + 1;
+    y_max -= y_min;
+
+    rectangles_chain = stack_rectangles_chain;
+    if (y_max > ARRAY_LENGTH (stack_rectangles_chain)) {
+	rectangles_chain = _cairo_malloc_ab (y_max, sizeof (rectangle_t *));
+	if (unlikely (rectangles_chain == NULL))
+	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
+    }
+    memset (rectangles_chain, 0, y_max * sizeof (rectangle_t*));
+
     rectangles = stack_rectangles;
     rectangles_ptrs = stack_rectangles_ptrs;
     if (in->num_boxes > ARRAY_LENGTH (stack_rectangles)) {
@@ -758,8 +783,11 @@ _cairo_bentley_ottmann_tessellate_boxes (const cairo_boxes_t *in,
 					  sizeof (rectangle_t) +
 					  sizeof (rectangle_t *),
 					  sizeof (rectangle_t *));
-	if (unlikely (rectangles == NULL))
+	if (unlikely (rectangles == NULL)) {
+	    if (rectangles_chain != stack_rectangles_chain)
+		free (rectangles_chain);
 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
+	}
 
 	rectangles_ptrs = (rectangle_t **) (rectangles + in->num_boxes);
     }
@@ -768,6 +796,8 @@ _cairo_bentley_ottmann_tessellate_boxes (const cairo_boxes_t *in,
     for (chunk = &in->chunks; chunk != NULL; chunk = chunk->next) {
 	const cairo_box_t *box = chunk->base;
 	for (i = 0; i < chunk->count; i++) {
+	    int h;
+
 	    if (box[i].p1.x < box[i].p2.x) {
 		rectangles[j].left.x = box[i].p1.x;
 		rectangles[j].left.dir = 1;
@@ -788,11 +818,27 @@ _cairo_bentley_ottmann_tessellate_boxes (const cairo_boxes_t *in,
 	    rectangles[j].top = box[i].p1.y;
 	    rectangles[j].bottom = box[i].p2.y;
 
-	    rectangles_ptrs[j] = &rectangles[j];
+	    h = _cairo_fixed_integer_floor (box[i].p1.y) - y_min;
+	    rectangles[j].left.next = (edge_t *)rectangles_chain[h];
+	    rectangles_chain[h] = &rectangles[j];
 	    j++;
 	}
     }
 
+    j = 0;
+    for (y_min = 0; y_min < y_max; y_min++) {
+	rectangle_t *r;
+	int start = j;
+	for (r = rectangles_chain[y_min]; r; r = (rectangle_t *)r->left.next)
+	    rectangles_ptrs[j++] = r;
+	if (j > start + 1)
+		_rectangle_sort (rectangles_ptrs + start, j - start);
+    }
+    assert (j == in->num_boxes);
+
+    if (rectangles_chain != stack_rectangles_chain)
+	free (rectangles_chain);
+
     _cairo_boxes_clear (out);
     status = _cairo_bentley_ottmann_tessellate_rectangular (rectangles_ptrs, j,
 							    fill_rule,
commit fec80f11990adbb4c1220d444186ed600082956d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 31 22:29:48 2011 +0100

    perf: Add a few more variations to dragon to exercise unaligned lines/spans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/perf/micro/dragon.c b/perf/micro/dragon.c
index 29057e9..4b16f38 100644
--- a/perf/micro/dragon.c
+++ b/perf/micro/dragon.c
@@ -203,6 +203,13 @@ do_dragon_solid (cairo_t *cr, int width, int height, int loops)
 }
 
 static cairo_perf_ticks_t
+do_dragon_solid_unaligned (cairo_t *cr, int width, int height, int loops)
+{
+    cairo_translate (cr, 0.01, 0.01);
+    return do_dragon_solid (cr, width, height, loops);
+}
+
+static cairo_perf_ticks_t
 do_dragon_solid_aligned_clip (cairo_t *cr, int width, int height, int loops)
 {
     cairo_reset_clip (cr);
@@ -214,6 +221,18 @@ do_dragon_solid_aligned_clip (cairo_t *cr, int width, int height, int loops)
 }
 
 static cairo_perf_ticks_t
+do_dragon_unaligned_solid_aligned_clip (cairo_t *cr, int width, int height, int loops)
+{
+    cairo_translate (cr, 0.01, 0.01);
+    cairo_reset_clip (cr);
+    cairo_rectangle (cr, 10, 10, width/2 + 10, height/2 + 10);
+    cairo_rectangle (cr, width/2-20, height/2-20, width/2 + 10, height/2 + 10);
+    cairo_clip (cr);
+
+    return do_dragon_solid (cr, width, height, loops);
+}
+
+static cairo_perf_ticks_t
 do_dragon_solid_unaligned_clip (cairo_t *cr, int width, int height, int loops)
 {
     cairo_reset_clip (cr);
@@ -225,6 +244,18 @@ do_dragon_solid_unaligned_clip (cairo_t *cr, int width, int height, int loops)
 }
 
 static cairo_perf_ticks_t
+do_dragon_unaligned_solid_unaligned_clip (cairo_t *cr, int width, int height, int loops)
+{
+    cairo_translate (cr, 0.01, 0.01);
+    cairo_reset_clip (cr);
+    cairo_rectangle (cr, 10.5, 10.5, width/2 + 10, height/2 + 10);
+    cairo_rectangle (cr, width/2-20, height/2-20, width/2 + 9.5, height/2 + 9.5);
+    cairo_clip (cr);
+
+    return do_dragon_solid (cr, width, height, loops);
+}
+
+static cairo_perf_ticks_t
 do_dragon_solid_circle_clip (cairo_t *cr, int width, int height, int loops)
 {
     cairo_reset_clip (cr);
@@ -241,8 +272,11 @@ dragon (cairo_perf_t *perf, cairo_t *cr, int width, int height)
 	return;
 
     cairo_perf_run (perf, "dragon-solid", do_dragon_solid, NULL);
+    cairo_perf_run (perf, "dragon-unaligned-solid", do_dragon_solid_unaligned, NULL);
     cairo_perf_run (perf, "dragon-solid-aligned-clip", do_dragon_solid_aligned_clip, NULL);
+    cairo_perf_run (perf, "dragon-unaligned-solid-aligned-clip", do_dragon_unaligned_solid_aligned_clip, NULL);
     cairo_perf_run (perf, "dragon-solid-unaligned-clip", do_dragon_solid_unaligned_clip, NULL);
+    cairo_perf_run (perf, "dragon-unaligned-solid-unaligned-clip", do_dragon_unaligned_solid_unaligned_clip, NULL);
     cairo_perf_run (perf, "dragon-solid-circle-clip", do_dragon_solid_circle_clip, NULL);
     cairo_perf_run (perf, "dragon", do_dragon, NULL);
 }
commit 4a8f638c9a7772e9e8eaa7246b892eedb1d784d9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 31 21:48:47 2011 +0100

    tor: Suppress repeated spans
    
    Under certain circumstances we will emit identical spans for when the
    edge covers the entire pixel and then for the subsequent pixels. These
    can be squashed into a single span.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/cairo-tor-scan-converter.c b/src/cairo-tor-scan-converter.c
index 32a6293..954c514 100644
--- a/src/cairo-tor-scan-converter.c
+++ b/src/cairo-tor-scan-converter.c
@@ -1816,7 +1816,7 @@ blit_with_span_renderer (struct cell_list *cells,
 {
     struct cell *cell = cells->head.next;
     int prev_x = xmin, last_x = -1;
-    int cover = 0, last_cover = -1;
+    int cover = 0, last_cover = 0;
     cairo_half_open_span_t *spans;
     unsigned num_spans;
 
@@ -1851,7 +1851,7 @@ blit_with_span_renderer (struct cell_list *cells,
 	int x = cell->x;
 	int area;
 
-	if (x > prev_x) {
+	if (x > prev_x && cover != last_cover) {
 	    spans[num_spans].x = prev_x;
 	    spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
 	    last_cover = cover;
commit c0407e84e919bca08f8cd2e1e350e54f36122968
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 31 18:10:40 2011 +0100

    test: Extend PDF blend tests to include an opacity mask
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/Makefile.refs b/test/Makefile.refs
index 6d2a0c8..1d383ef 100644
--- a/test/Makefile.refs
+++ b/test/Makefile.refs
@@ -394,6 +394,8 @@ REFERENCE_IMAGES = \
 	extend-repeat.ps2.ref.png \
 	extend-repeat.ps3.ref.png \
 	extend-repeat.ref.png \
+	extended-blend-alpha-mask.argb32.ref.png \
+	extended-blend-alpha-mask.rgb24.ref.png \
 	extended-blend-alpha.argb32.ref.png \
 	extended-blend-alpha.image16.ref.png \
 	extended-blend-alpha.quartz.argb32.ref.png \
@@ -401,6 +403,8 @@ REFERENCE_IMAGES = \
 	extended-blend-alpha.rgb24.ref.png \
 	extended-blend-alpha.svg12.argb32.xfail.png \
 	extended-blend-alpha.svg12.rgb24.xfail.png \
+	extended-blend-mask.argb32.ref.png \
+	extended-blend-mask.rgb24.ref.png \
 	extended-blend-solid-alpha.argb32.ref.png \
 	extended-blend-solid-alpha.image16.ref.png \
 	extended-blend-solid-alpha.rgb24.ref.png \
diff --git a/test/extended-blend-alpha-mask.argb32.ref.png b/test/extended-blend-alpha-mask.argb32.ref.png
new file mode 100644
index 0000000..a1dd2a9
Binary files /dev/null and b/test/extended-blend-alpha-mask.argb32.ref.png differ
diff --git a/test/extended-blend-alpha-mask.rgb24.ref.png b/test/extended-blend-alpha-mask.rgb24.ref.png
new file mode 100644
index 0000000..b0a6c6c
Binary files /dev/null and b/test/extended-blend-alpha-mask.rgb24.ref.png differ
diff --git a/test/extended-blend-mask.argb32.ref.png b/test/extended-blend-mask.argb32.ref.png
new file mode 100644
index 0000000..5fa78e4
Binary files /dev/null and b/test/extended-blend-mask.argb32.ref.png differ
diff --git a/test/extended-blend-mask.rgb24.ref.png b/test/extended-blend-mask.rgb24.ref.png
new file mode 100644
index 0000000..1c85474
Binary files /dev/null and b/test/extended-blend-mask.rgb24.ref.png differ
diff --git a/test/extended-blend.c b/test/extended-blend.c
index 45e0e48..a45ddc9 100644
--- a/test/extended-blend.c
+++ b/test/extended-blend.c
@@ -139,6 +139,26 @@ do_blend (cairo_t *cr, cairo_operator_t op, cairo_bool_t alpha)
     cairo_surface_destroy (bg);
 }
 
+static void
+do_blend_mask (cairo_t *cr, cairo_operator_t op, cairo_bool_t alpha)
+{
+    cairo_surface_t *bg, *fg;
+
+    create_patterns (cr, &bg, &fg, alpha);
+
+    /* not using CAIRO_OPERATOR_SOURCE here, it triggers a librsvg bug */
+    cairo_set_operator (cr, CAIRO_OPERATOR_OVER);
+    cairo_set_source_surface (cr, bg, 0, 0);
+    cairo_paint (cr);
+
+    cairo_set_operator (cr, op);
+    cairo_set_source_surface (cr, fg, 0, 0);
+    cairo_paint_with_alpha (cr, .5);
+
+    cairo_surface_destroy (fg);
+    cairo_surface_destroy (bg);
+}
+
 static cairo_test_status_t
 draw (cairo_t *cr, cairo_bool_t alpha,
       void (*blend)(cairo_t *, cairo_operator_t, cairo_bool_t))
@@ -182,6 +202,17 @@ draw_extended_blend_solid_alpha (cairo_t *cr, int width, int height)
     return draw (cr, TRUE, do_blend_solid);
 }
 
+static cairo_test_status_t
+draw_extended_blend_mask (cairo_t *cr, int width, int height)
+{
+    return draw (cr, FALSE, do_blend_mask);
+}
+static cairo_test_status_t
+draw_extended_blend_alpha_mask (cairo_t *cr, int width, int height)
+{
+    return draw (cr, TRUE, do_blend_mask);
+}
+
 CAIRO_TEST (extended_blend,
 	    "Tests extended blend modes without alpha",
 	    "operator", /* keywords */
@@ -196,6 +227,20 @@ CAIRO_TEST (extended_blend_alpha,
 	    FULL_WIDTH * SIZE, FULL_HEIGHT * SIZE,
 	    NULL, draw_extended_blend_alpha)
 
+CAIRO_TEST (extended_blend_mask,
+	    "Tests extended blend modes with an alpha mask",
+	    "operator,mask", /* keywords */
+	    NULL, /* requirements */
+	    FULL_WIDTH * SIZE, FULL_HEIGHT * SIZE,
+	    NULL, draw_extended_blend_mask)
+CAIRO_TEST (extended_blend_alpha_mask,
+	    "Tests extended blend modes with an alpha mask",
+	    "operator,mask", /* keywords */
+	    NULL, /* requirements */
+	    FULL_WIDTH * SIZE, FULL_HEIGHT * SIZE,
+	    NULL, draw_extended_blend_alpha_mask)
+
+
 CAIRO_TEST (extended_blend_solid,
 	    "Tests extended blend modes on solid patterns without alpha",
 	    "operator", /* keywords */
commit 3520aecfe5567d4f99e299aefec3346879f982aa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 31 11:24:20 2011 +0100

    image: the boxes are already pixel-aligned so skip the extra rounding step

diff --git a/src/cairo-image-surface.c b/src/cairo-image-surface.c
index fd2ec82..293191d 100644
--- a/src/cairo-image-surface.c
+++ b/src/cairo-image-surface.c
@@ -3034,10 +3034,10 @@ _composite_boxes (cairo_image_surface_t *dst,
 		cairo_box_t *box = chunk->base;
 
 		for (i = 0; i < chunk->count; i++) {
-		    int x1 = _cairo_fixed_integer_round_down (box[i].p1.x);
-		    int y1 = _cairo_fixed_integer_round_down (box[i].p1.y);
-		    int x2 = _cairo_fixed_integer_round_down (box[i].p2.x);
-		    int y2 = _cairo_fixed_integer_round_down (box[i].p2.y);
+		    int x1 = _cairo_fixed_integer_part (box[i].p1.x);
+		    int y1 = _cairo_fixed_integer_part (box[i].p1.y);
+		    int x2 = _cairo_fixed_integer_part (box[i].p2.x);
+		    int y2 = _cairo_fixed_integer_part (box[i].p2.y);
 
 		    if (x2 == x1 || y2 == y1)
 			continue;
@@ -3070,10 +3070,10 @@ _composite_boxes (cairo_image_surface_t *dst,
 	    cairo_box_t *box = chunk->base;
 
 	    for (i = 0; i < chunk->count; i++) {
-		int x1 = _cairo_fixed_integer_round_down (box[i].p1.x);
-		int y1 = _cairo_fixed_integer_round_down (box[i].p1.y);
-		int x2 = _cairo_fixed_integer_round_down (box[i].p2.x);
-		int y2 = _cairo_fixed_integer_round_down (box[i].p2.y);
+		int x1 = _cairo_fixed_integer_part (box[i].p1.x);
+		int y1 = _cairo_fixed_integer_part (box[i].p1.y);
+		int x2 = _cairo_fixed_integer_part (box[i].p2.x);
+		int y2 = _cairo_fixed_integer_part (box[i].p2.y);
 
 		if (x2 == x1 || y2 == y1)
 		    continue;
@@ -3131,10 +3131,10 @@ _composite_boxes (cairo_image_surface_t *dst,
 	    const cairo_box_t *box = chunk->base;
 
 	    for (i = 0; i < chunk->count; i++) {
-		int x1 = _cairo_fixed_integer_round_down (box[i].p1.x);
-		int y1 = _cairo_fixed_integer_round_down (box[i].p1.y);
-		int x2 = _cairo_fixed_integer_round_down (box[i].p2.x);
-		int y2 = _cairo_fixed_integer_round_down (box[i].p2.y);
+		int x1 = _cairo_fixed_integer_part (box[i].p1.x);
+		int y1 = _cairo_fixed_integer_part (box[i].p1.y);
+		int x2 = _cairo_fixed_integer_part (box[i].p2.x);
+		int y2 = _cairo_fixed_integer_part (box[i].p2.y);
 
 		if (x2 == x1 || y2 == y1)
 		    continue;
commit d5537547069babb338e7e27b97d219aace4d348a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 31 12:28:33 2011 +0100

    tor: Sort the initial edge correctly
    
    Don't assume that the initial edge on the active list has the lower
    position.

diff --git a/src/cairo-tor-scan-converter.c b/src/cairo-tor-scan-converter.c
index 817694e..32a6293 100644
--- a/src/cairo-tor-scan-converter.c
+++ b/src/cairo-tor-scan-converter.c
@@ -1142,12 +1142,22 @@ static struct edge *
 merge_sorted_edges (struct edge *head_a, struct edge *head_b)
 {
     struct edge *head, **next;
+    int32_t x;
+
+    if (head_a == NULL)
+	return head_b;
 
-    head = head_a;
     next = &head;
+    if (head_a->x.quo <= head_b->x.quo) {
+	head = head_a;
+    } else {
+	head = head_b;
+	goto start_with_b;
+    }
 
-    while (1) {
-	while (head_a != NULL && head_a->x.quo <= head_b->x.quo) {
+    do {
+	x = head_b->x.quo;
+	while (head_a != NULL && head_a->x.quo <= x) {
 	    next = &head_a->next;
 	    head_a = head_a->next;
 	}
@@ -1156,7 +1166,9 @@ merge_sorted_edges (struct edge *head_a, struct edge *head_b)
 	if (head_a == NULL)
 	    return head;
 
-	while (head_b != NULL && head_b->x.quo <= head_a->x.quo) {
+start_with_b:
+	x = head_a->x.quo;
+	while (head_b != NULL && head_b->x.quo <= x) {
 	    next = &head_b->next;
 	    head_b = head_b->next;
 	}
@@ -1164,7 +1176,7 @@ merge_sorted_edges (struct edge *head_a, struct edge *head_b)
 	*next = head_a;
 	if (head_b == NULL)
 	    return head;
-    }
+    } while (1);
 }
 
 /*
@@ -1287,21 +1299,25 @@ active_list_merge_edges_from_polygon(struct active_list *active,
      * the active list. */
     int min_height = active->min_height;
     struct edge *subrow_edges = NULL;
+    struct edge *tail = *ptail;
 
-    while (1) {
-	struct edge *tail = *ptail;
-	if (NULL == tail) break;
+    do {
+	struct edge *next = tail->next;
 
 	if (y == tail->ytop) {
-	    *ptail = tail->next;
 	    tail->next = subrow_edges;
 	    subrow_edges = tail;
+
 	    if (tail->height_left < min_height)
 		min_height = tail->height_left;
-	} else {
+
+	    *ptail = next;
+	} else
 	    ptail = &tail->next;
-	}
-    }
+
+	tail = next;
+    } while (tail);
+
     if (subrow_edges) {
 	sort_edges (subrow_edges, UINT_MAX, &subrow_edges);
 	active->head = merge_sorted_edges (active->head, subrow_edges);
commit bca402a8e362f1f93a0866de5dabdca132c04684
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 1 11:45:11 2011 +0100

    clip: Mark __cairo_clip_all as private and do not export
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/cairo-clip-private.h b/src/cairo-clip-private.h
index b4cb018..5302471 100644
--- a/src/cairo-clip-private.h
+++ b/src/cairo-clip-private.h
@@ -79,7 +79,7 @@ _cairo_clip_path_destroy (cairo_clip_path_t *clip_path);
 cairo_private void
 _cairo_clip_destroy (cairo_clip_t *clip);
 
-extern const cairo_clip_t __cairo_clip_all;
+cairo_private extern const cairo_clip_t __cairo_clip_all;
 
 static inline cairo_bool_t _cairo_clip_is_all_clipped(const cairo_clip_t *clip)
 {


More information about the cairo-commit mailing list