[cairo] help building cairo on windows
Dimiter 'malkia' Stanev
malkia at gmail.com
Mon Sep 10 11:17:47 PDT 2012
On 9/10/2012 7:04 AM, Cosmin Apreutesei wrote:
> Hi,
>
> I have to build cairo on windows because I want to use the recording
> surface and I can't find binaries above 1.10 anywhere on the net (till
> now I have used the binaries from GTK but these are old and the
> recording surface doesn't work with those).
>
> Trying to build pixman with:
>
>> make CFG=debug -f Makefile.win32
>
> gives me:
>
> pixman-mmx.c(545) : error C2143: syntax error : missing ';' before 'type'
>
> and then 100 syntax errors like that (other c files compile ok until
> the mmx one).
>
> The sources were pulled today from git. The environment is VSE2008.
> make is 3.81 from mingw (I also had to throw in pkg-config binaries +
> dependencies from GTK).
>
> I also tried to build the latest pixman (0.26.2) with the same command
> but it failed with:
>
> Entering directory `/x/work/pixman-0.26.2/test'
> Makefile.win32: No such file or directory
>
> I also tried building with mozilla-build from the mingw bash prompt,
> with the same results.
>
>
> So what's the best (preferably from cmdline) way to build pixman and
> cairo under windows? And which is the best version? The release or the
> git one?
>
> Btw, if anyone has an up-to-date cairo.dll that would work for me too.
>
Hi Cosmin,
With this patch you should be able to compile.
Thanks,
Dimiter 'malkia' Stanev
pixman-mmx.diff
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 74a5e87..9e597db 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -541,7 +541,7 @@ expand565 (__m64 pixel, int pos)
static force_inline void
expand_4xpacked565 (__m64 vin, __m64 *vout0, __m64 *vout1, int full_alpha)
{
- __m64 t0, t1, alpha = _mm_setzero_si64 ();;
+ __m64 t0, t1, alpha = _mm_setzero_si64 ();
__m64 r = _mm_and_si64 (vin, MC (expand_565_r));
__m64 g = _mm_and_si64 (vin, MC (expand_565_g));
__m64 b = _mm_and_si64 (vin, MC (expand_565_b));
@@ -1902,22 +1902,22 @@ mmx_composite_over_8888_0565
(pixman_implementation_t *imp,
while (w >= 4)
{
__m64 vdest = *(__m64 *)dst;
- __m64 v0, v1, v2, v3;
+ __m64 v0, v1, v2, v3, vsrc0, vsrc1, vsrc2, vsrc3;
expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
- __m64 vsrc0 = load8888 ((src + 0));
- __m64 vsrc1 = load8888 ((src + 1));
- __m64 vsrc2 = load8888 ((src + 2));
- __m64 vsrc3 = load8888 ((src + 3));
-
+ vsrc0 = load8888 ((src + 0));
+ vsrc1 = load8888 ((src + 1));
+ vsrc2 = load8888 ((src + 2));
+ vsrc3 = load8888 ((src + 3));
+
v0 = over (vsrc0, expand_alpha (vsrc0), v0);
v1 = over (vsrc1, expand_alpha (vsrc1), v1);
v2 = over (vsrc2, expand_alpha (vsrc2), v2);
v3 = over (vsrc3, expand_alpha (vsrc3), v3);
-
+
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);
-
+
w -= 4;
dst += 4;
src += 4;
@@ -2454,22 +2454,22 @@ mmx_composite_over_n_8_0565
(pixman_implementation_t *imp,
else if (m0 | m1 | m2 | m3)
{
__m64 vdest = *(__m64 *)dst;
- __m64 v0, v1, v2, v3;
+ __m64 v0, v1, v2, v3, vm0, vm1, vm2, vm3;
expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
- __m64 vm0 = to_m64 (m0);
+ vm0 = to_m64 (m0);
v0 = in_over (vsrc, vsrca, expand_alpha_rev (vm0), v0);
-
- __m64 vm1 = to_m64 (m1);
+
+ vm1 = to_m64 (m1);
v1 = in_over (vsrc, vsrca, expand_alpha_rev (vm1), v1);
-
- __m64 vm2 = to_m64 (m2);
+
+ vm2 = to_m64 (m2);
v2 = in_over (vsrc, vsrca, expand_alpha_rev (vm2), v2);
-
- __m64 vm3 = to_m64 (m3);
+
+ vm3 = to_m64 (m3);
v3 = in_over (vsrc, vsrca, expand_alpha_rev (vm3), v3);
-
+
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);;
}
@@ -3545,32 +3545,35 @@ mmx_composite_over_reverse_n_8888
(pixman_implementation_t *imp,
#define BILINEAR_INTERPOLATE_ONE_PIXEL(pix) \
do { \
+ __m64 t_hi, t_lo, b_hi, b_lo, hi, lo;
\
/* fetch 2x2 pixel block into 2 mmx registers */ \
__m64 t = ldq_u ((__m64 *)&src_top [pixman_fixed_to_int (vx)]); \
__m64 b = ldq_u ((__m64 *)&src_bottom [pixman_fixed_to_int (vx)]); \
vx += unit_x; \
/* vertical interpolation */ \
- __m64 t_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (t, mm_zero), mm_wt); \
- __m64 t_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (t, mm_zero), mm_wt); \
- __m64 b_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (b, mm_zero), mm_wb); \
- __m64 b_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (b, mm_zero), mm_wb); \
- __m64 hi = _mm_add_pi16 (t_hi, b_hi); \
- __m64 lo = _mm_add_pi16 (t_lo, b_lo); \
+ t_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (t, mm_zero), mm_wt); \
+ t_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (t, mm_zero), mm_wt); \
+ b_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (b, mm_zero), mm_wb); \
+ b_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (b, mm_zero), mm_wb); \
+ hi = _mm_add_pi16 (t_hi, b_hi); \
+ lo = _mm_add_pi16 (t_lo, b_lo); \
if (BILINEAR_INTERPOLATION_BITS < 8) \
{ \
+ __m64 p, q;
\
/* calculate horizontal weights */ \
__m64 mm_wh = _mm_add_pi16 (mm_addc7, _mm_xor_si64 (mm_xorc7, \
_mm_srli_pi16 (mm_x, \
16 - BILINEAR_INTERPOLATION_BITS))); \
mm_x = _mm_add_pi16 (mm_x, mm_ux); \
/* horizontal interpolation */ \
- __m64 p = _mm_unpacklo_pi16 (lo, hi); \
- __m64 q = _mm_unpackhi_pi16 (lo, hi); \
+ p = _mm_unpacklo_pi16 (lo, hi); \
+ q = _mm_unpackhi_pi16 (lo, hi); \
lo = _mm_madd_pi16 (p, mm_wh); \
hi = _mm_madd_pi16 (q, mm_wh); \
} \
else \
{ \
+ __m64 mm_lo_lo, mm_lo_hi, mm_hi_lo, mm_hi_hi; \
/* calculate horizontal weights */ \
__m64 mm_wh_lo = _mm_sub_pi16 (mm_BSHIFT, _mm_srli_pi16 (mm_x, \
16 - BILINEAR_INTERPOLATION_BITS)); \
@@ -3578,10 +3581,10 @@ do { \
16 - BILINEAR_INTERPOLATION_BITS); \
mm_x = _mm_add_pi16 (mm_x, mm_ux); \
/* horizontal interpolation */ \
- __m64 mm_lo_lo = _mm_mullo_pi16 (lo, mm_wh_lo); \
- __m64 mm_lo_hi = _mm_mullo_pi16 (hi, mm_wh_hi); \
- __m64 mm_hi_lo = _mm_mulhi_pu16 (lo, mm_wh_lo); \
- __m64 mm_hi_hi = _mm_mulhi_pu16 (hi, mm_wh_hi); \
+ mm_lo_lo = _mm_mullo_pi16 (lo, mm_wh_lo); \
+ mm_lo_hi = _mm_mullo_pi16 (hi, mm_wh_hi); \
+ mm_hi_lo = _mm_mulhi_pu16 (lo, mm_wh_lo); \
+ mm_hi_hi = _mm_mulhi_pu16 (hi, mm_wh_hi); \
lo = _mm_add_pi32 (_mm_unpacklo_pi16 (mm_lo_lo, mm_hi_lo), \
_mm_unpacklo_pi16 (mm_lo_hi, mm_hi_hi)); \
hi = _mm_add_pi32 (_mm_unpackhi_pi16 (mm_lo_lo, mm_hi_lo), \
-------------- next part --------------
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 74a5e87..9e597db 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -541,7 +541,7 @@ expand565 (__m64 pixel, int pos)
static force_inline void
expand_4xpacked565 (__m64 vin, __m64 *vout0, __m64 *vout1, int full_alpha)
{
- __m64 t0, t1, alpha = _mm_setzero_si64 ();;
+ __m64 t0, t1, alpha = _mm_setzero_si64 ();
__m64 r = _mm_and_si64 (vin, MC (expand_565_r));
__m64 g = _mm_and_si64 (vin, MC (expand_565_g));
__m64 b = _mm_and_si64 (vin, MC (expand_565_b));
@@ -1902,22 +1902,22 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
while (w >= 4)
{
__m64 vdest = *(__m64 *)dst;
- __m64 v0, v1, v2, v3;
+ __m64 v0, v1, v2, v3, vsrc0, vsrc1, vsrc2, vsrc3;
expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
- __m64 vsrc0 = load8888 ((src + 0));
- __m64 vsrc1 = load8888 ((src + 1));
- __m64 vsrc2 = load8888 ((src + 2));
- __m64 vsrc3 = load8888 ((src + 3));
-
+ vsrc0 = load8888 ((src + 0));
+ vsrc1 = load8888 ((src + 1));
+ vsrc2 = load8888 ((src + 2));
+ vsrc3 = load8888 ((src + 3));
+
v0 = over (vsrc0, expand_alpha (vsrc0), v0);
v1 = over (vsrc1, expand_alpha (vsrc1), v1);
v2 = over (vsrc2, expand_alpha (vsrc2), v2);
v3 = over (vsrc3, expand_alpha (vsrc3), v3);
-
+
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);
-
+
w -= 4;
dst += 4;
src += 4;
@@ -2454,22 +2454,22 @@ mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
else if (m0 | m1 | m2 | m3)
{
__m64 vdest = *(__m64 *)dst;
- __m64 v0, v1, v2, v3;
+ __m64 v0, v1, v2, v3, vm0, vm1, vm2, vm3;
expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0);
- __m64 vm0 = to_m64 (m0);
+ vm0 = to_m64 (m0);
v0 = in_over (vsrc, vsrca, expand_alpha_rev (vm0), v0);
-
- __m64 vm1 = to_m64 (m1);
+
+ vm1 = to_m64 (m1);
v1 = in_over (vsrc, vsrca, expand_alpha_rev (vm1), v1);
-
- __m64 vm2 = to_m64 (m2);
+
+ vm2 = to_m64 (m2);
v2 = in_over (vsrc, vsrca, expand_alpha_rev (vm2), v2);
-
- __m64 vm3 = to_m64 (m3);
+
+ vm3 = to_m64 (m3);
v3 = in_over (vsrc, vsrca, expand_alpha_rev (vm3), v3);
-
+
*(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);;
}
@@ -3545,32 +3545,35 @@ mmx_composite_over_reverse_n_8888 (pixman_implementation_t *imp,
#define BILINEAR_INTERPOLATE_ONE_PIXEL(pix) \
do { \
+ __m64 t_hi, t_lo, b_hi, b_lo, hi, lo; \
/* fetch 2x2 pixel block into 2 mmx registers */ \
__m64 t = ldq_u ((__m64 *)&src_top [pixman_fixed_to_int (vx)]); \
__m64 b = ldq_u ((__m64 *)&src_bottom [pixman_fixed_to_int (vx)]); \
vx += unit_x; \
/* vertical interpolation */ \
- __m64 t_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (t, mm_zero), mm_wt); \
- __m64 t_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (t, mm_zero), mm_wt); \
- __m64 b_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (b, mm_zero), mm_wb); \
- __m64 b_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (b, mm_zero), mm_wb); \
- __m64 hi = _mm_add_pi16 (t_hi, b_hi); \
- __m64 lo = _mm_add_pi16 (t_lo, b_lo); \
+ t_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (t, mm_zero), mm_wt); \
+ t_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (t, mm_zero), mm_wt); \
+ b_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (b, mm_zero), mm_wb); \
+ b_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (b, mm_zero), mm_wb); \
+ hi = _mm_add_pi16 (t_hi, b_hi); \
+ lo = _mm_add_pi16 (t_lo, b_lo); \
if (BILINEAR_INTERPOLATION_BITS < 8) \
{ \
+ __m64 p, q; \
/* calculate horizontal weights */ \
__m64 mm_wh = _mm_add_pi16 (mm_addc7, _mm_xor_si64 (mm_xorc7, \
_mm_srli_pi16 (mm_x, \
16 - BILINEAR_INTERPOLATION_BITS))); \
mm_x = _mm_add_pi16 (mm_x, mm_ux); \
/* horizontal interpolation */ \
- __m64 p = _mm_unpacklo_pi16 (lo, hi); \
- __m64 q = _mm_unpackhi_pi16 (lo, hi); \
+ p = _mm_unpacklo_pi16 (lo, hi); \
+ q = _mm_unpackhi_pi16 (lo, hi); \
lo = _mm_madd_pi16 (p, mm_wh); \
hi = _mm_madd_pi16 (q, mm_wh); \
} \
else \
{ \
+ __m64 mm_lo_lo, mm_lo_hi, mm_hi_lo, mm_hi_hi; \
/* calculate horizontal weights */ \
__m64 mm_wh_lo = _mm_sub_pi16 (mm_BSHIFT, _mm_srli_pi16 (mm_x, \
16 - BILINEAR_INTERPOLATION_BITS)); \
@@ -3578,10 +3581,10 @@ do { \
16 - BILINEAR_INTERPOLATION_BITS); \
mm_x = _mm_add_pi16 (mm_x, mm_ux); \
/* horizontal interpolation */ \
- __m64 mm_lo_lo = _mm_mullo_pi16 (lo, mm_wh_lo); \
- __m64 mm_lo_hi = _mm_mullo_pi16 (hi, mm_wh_hi); \
- __m64 mm_hi_lo = _mm_mulhi_pu16 (lo, mm_wh_lo); \
- __m64 mm_hi_hi = _mm_mulhi_pu16 (hi, mm_wh_hi); \
+ mm_lo_lo = _mm_mullo_pi16 (lo, mm_wh_lo); \
+ mm_lo_hi = _mm_mullo_pi16 (hi, mm_wh_hi); \
+ mm_hi_lo = _mm_mulhi_pu16 (lo, mm_wh_lo); \
+ mm_hi_hi = _mm_mulhi_pu16 (hi, mm_wh_hi); \
lo = _mm_add_pi32 (_mm_unpacklo_pi16 (mm_lo_lo, mm_hi_lo), \
_mm_unpacklo_pi16 (mm_lo_hi, mm_hi_hi)); \
hi = _mm_add_pi32 (_mm_unpackhi_pi16 (mm_lo_lo, mm_hi_lo), \
More information about the cairo
mailing list