[cairo] Compiler warnings of type-punned pointers in fbmmx.c
Daniel Amelang
daniel.amelang at gmail.com
Sat Jan 20 20:38:11 PST 2007
On 1/20/07, Carl Worth <cworth at cworth.org> wrote:
> In fbmmx there is currently a raft of compiler warnings[*] about
> type-punned pointer. Can someone who is more intimate with code than I
> am please clean it up?
I don't claim to be "intimate" with this code, but I do have a simple
fix that might be all that's needed. I see no change in "cairo test"
results nor "cairo perf" results, so I figure it's good enough to post
for review.
I'll push it out myself if I don't hear any complaints over the next few days.
Dan
(inlining the patch to keep gmail from base64 encoding it)
>From 55478c3e89739299d1385df3210ff0c6b67f6a28 Mon Sep 17 00:00:00 2001
From: Dan Amelang <dan at amelang.net>
Date: Sat, 20 Jan 2007 18:34:09 -0800
Subject: [PATCH] Fix fbmmx compile warnings
Just changed each instance of "*((type*)(&data))" into "(type)data". No
changes in behavior (neither correctness nor performance) noted.
---
pixman/src/fbmmx.c | 45 +++++++++++++++++++++++++++++----------------
1 files changed, 29 insertions(+), 16 deletions(-)
diff --git a/pixman/src/fbmmx.c b/pixman/src/fbmmx.c
index aa18019..a4dca8a 100644
--- a/pixman/src/fbmmx.c
+++ b/pixman/src/fbmmx.c
@@ -1619,7 +1619,9 @@ fbCompositeSolidMask_nx8x8888mmx
(pixman_operator_t op,
if (m)
{
- __m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev
(*(__m64*)(&m)), load8888(*dst));
+ __m64 vdest = in_over (vsrc, vsrca,
+ expand_alpha_rev ((__m64) m),
+ load8888 (*dst));
*dst = store8888(vdest);
}
@@ -1647,8 +1649,13 @@ fbCompositeSolidMask_nx8x8888mmx
(pixman_operator_t op,
vdest = *(__m64 *)dst;
- dest0 = in_over(vsrc, vsrca, expand_alpha_rev (*(__m64*)(&m0)),
expand8888(vdest, 0));
- dest1 = in_over(vsrc, vsrca, expand_alpha_rev (*(__m64*)(&m1)),
expand8888(vdest, 1));
+ dest0 = in_over (vsrc, vsrca,
+ expand_alpha_rev ((__m64) m0),
+ expand8888 (vdest, 0));
+
+ dest1 = in_over (vsrc, vsrca,
+ expand_alpha_rev ((__m64) m1),
+ expand8888 (vdest, 1));
*(__m64 *)dst = pack8888(dest0, dest1);
}
@@ -1667,7 +1674,9 @@ fbCompositeSolidMask_nx8x8888mmx
(pixman_operator_t op,
if (m)
{
__m64 vdest = load8888(*dst);
- vdest = in_over(vsrc, vsrca, expand_alpha_rev (*(__m64*)(&m)), vdest);
+ vdest = in_over (vsrc, vsrca,
+ expand_alpha_rev ((__m64) m),
+ vdest);
*dst = store8888(vdest);
}
@@ -1737,7 +1746,7 @@ fbCompositeSolidMaskSrc_nx8x8888mmx
(pixman_operator_t op,
if (m)
{
- __m64 vdest = in(vsrc, expand_alpha_rev (*(__m64*)(&m)));
+ __m64 vdest = in (vsrc, expand_alpha_rev ((__m64) m));
*dst = store8888(vdest);
}
else
@@ -1769,8 +1778,8 @@ fbCompositeSolidMaskSrc_nx8x8888mmx
(pixman_operator_t op,
vdest = *(__m64 *)dst;
- dest0 = in(vsrc, expand_alpha_rev (*(__m64*)(&m0)));
- dest1 = in(vsrc, expand_alpha_rev (*(__m64*)(&m1)));
+ dest0 = in (vsrc, expand_alpha_rev ((__m64) m0));
+ dest1 = in (vsrc, expand_alpha_rev ((__m64) m1));
*(__m64 *)dst = pack8888(dest0, dest1);
}
@@ -1793,7 +1802,7 @@ fbCompositeSolidMaskSrc_nx8x8888mmx
(pixman_operator_t op,
if (m)
{
__m64 vdest = load8888(*dst);
- vdest = in(vsrc, expand_alpha_rev (*(__m64*)(&m)));
+ vdest = in (vsrc, expand_alpha_rev ((__m64) m));
*dst = store8888(vdest);
}
else
@@ -1848,7 +1857,7 @@ fbCompositeSolidMask_nx8x0565mmx
(pixman_operator_t op,
vsrca = expand_alpha (vsrc);
vsrc16 = pack565(vsrc, _mm_setzero_si64(), 0);
- src16 = *(ullong*)(&vsrc16);
+ src16 = (ullong) vsrc16;
srcsrcsrcsrc = (ullong)src16 << 48 | (ullong)src16 << 32 |
(ullong)src16 << 16 | (ullong)src16;
@@ -1870,7 +1879,9 @@ fbCompositeSolidMask_nx8x0565mmx
(pixman_operator_t op,
if (m)
{
__m64 vd = _mm_cvtsi32_si64 (*dst);
- __m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev
(*(__m64*)(&m)), expand565(vd, 0));
+ __m64 vdest = in_over (vsrc, vsrca,
+ expand_alpha_rev ((__m64) m),
+ expand565 (vd, 0));
*dst = _mm_cvtsi64_si32 (pack565(vdest, _mm_setzero_si64(), 0));
}
@@ -1900,13 +1911,13 @@ fbCompositeSolidMask_nx8x0565mmx
(pixman_operator_t op,
vdest = *(__m64 *)dst;
- vm0 = *(__m64*)(&m0);
+ vm0 = (__m64) m0;
vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm0),
expand565(vdest, 0)), vdest, 0);
- vm1 = *(__m64*)(&m1);
+ vm1 = (__m64) m1;
vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm1),
expand565(vdest, 1)), vdest, 1);
- vm2 = *(__m64*)(&m2);
+ vm2 = (__m64) m2;
vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm2),
expand565(vdest, 2)), vdest, 2);
- vm3 = *(__m64*)(&m3);
+ vm3 = (__m64) m3;
vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm3),
expand565(vdest, 3)), vdest, 3);
*(__m64 *)dst = vdest;
@@ -1926,7 +1937,9 @@ fbCompositeSolidMask_nx8x0565mmx
(pixman_operator_t op,
if (m)
{
__m64 vd = _mm_cvtsi32_si64 (*dst);
- __m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev
(*(__m64*)(&m)), expand565(vd, 0));
+ __m64 vdest = in_over (vsrc, vsrca,
+ expand_alpha_rev ((__m64) m),
+ expand565 (vd, 0));
*dst = _mm_cvtsi64_si32 (pack565(vdest, _mm_setzero_si64(), 0));
}
@@ -2657,7 +2670,7 @@ fbSolidFillmmx (FbPixels *pDraw,
}
fill = ((ullong)xor << 32) | xor;
- vfill = *(__m64*)&fill;
+ vfill = (__m64) fill;
while (height--)
{
--
1.4.4.2
More information about the cairo
mailing list