summaryrefslogtreecommitdiff
path: root/src/freetype2/base/ftcalc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/freetype2/base/ftcalc.c')
-rw-r--r--src/freetype2/base/ftcalc.c217
1 files changed, 176 insertions, 41 deletions
diff --git a/src/freetype2/base/ftcalc.c b/src/freetype2/base/ftcalc.c
index 63aed95..3892fab 100644
--- a/src/freetype2/base/ftcalc.c
+++ b/src/freetype2/base/ftcalc.c
@@ -4,7 +4,7 @@
/* */
/* Arithmetic computations (body). */
/* */
-/* Copyright 1996-2001, 2002, 2003, 2004, 2005, 2006 by */
+/* Copyright 1996-2001, 2002, 2003, 2004, 2005, 2006, 2008 by */
/* David Turner, Robert Wilhelm, and Werner Lemberg. */
/* */
/* This file is part of the FreeType project, and may only be used, */
@@ -33,10 +33,14 @@
#include <ft2build.h>
+#include FT_GLYPH_H
#include FT_INTERNAL_CALC_H
#include FT_INTERNAL_DEBUG_H
#include FT_INTERNAL_OBJECTS_H
+#ifdef FT_MULFIX_INLINED
+#undef FT_MulFix
+#endif
/* we need to define a 64-bits data type here */
@@ -106,12 +110,12 @@
FT_EXPORT_DEF( FT_Int32 )
FT_Sqrt32( FT_Int32 x )
{
- FT_ULong val, root, newroot, mask;
+ FT_UInt32 val, root, newroot, mask;
root = 0;
- mask = 0x40000000L;
- val = (FT_ULong)x;
+ mask = (FT_UInt32)0x40000000UL;
+ val = (FT_UInt32)x;
do
{
@@ -192,15 +196,33 @@
FT_MulFix( FT_Long a,
FT_Long b )
{
+#ifdef FT_MULFIX_ASSEMBLER
+
+ return FT_MULFIX_ASSEMBLER( a, b );
+
+#else
+
FT_Int s = 1;
FT_Long c;
- if ( a < 0 ) { a = -a; s = -1; }
- if ( b < 0 ) { b = -b; s = -s; }
+ if ( a < 0 )
+ {
+ a = -a;
+ s = -1;
+ }
+
+ if ( b < 0 )
+ {
+ b = -b;
+ s = -s;
+ }
c = (FT_Long)( ( (FT_Int64)a * b + 0x8000L ) >> 16 );
- return ( s > 0 ) ? c : -c ;
+
+ return ( s > 0 ) ? c : -c;
+
+#endif /* FT_MULFIX_ASSEMBLER */
}
@@ -340,6 +362,7 @@
long s;
+ /* XXX: this function does not allow 64-bit arguments */
if ( a == 0 || b == c )
return a;
@@ -355,12 +378,12 @@
FT_Int64 temp, temp2;
- ft_multo64( a, b, &temp );
+ ft_multo64( (FT_Int32)a, (FT_Int32)b, &temp );
temp2.hi = 0;
temp2.lo = (FT_UInt32)(c >> 1);
FT_Add64( &temp, &temp2, &temp );
- a = ft_div64by32( temp.hi, temp.lo, c );
+ a = ft_div64by32( temp.hi, temp.lo, (FT_Int32)c );
}
else
a = 0x7FFFFFFFL;
@@ -394,8 +417,8 @@
FT_Int64 temp;
- ft_multo64( a, b, &temp );
- a = ft_div64by32( temp.hi, temp.lo, c );
+ ft_multo64( (FT_Int32)a, (FT_Int32)b, &temp );
+ a = ft_div64by32( temp.hi, temp.lo, (FT_Int32)c );
}
else
a = 0x7FFFFFFFL;
@@ -412,31 +435,18 @@
FT_MulFix( FT_Long a,
FT_Long b )
{
- /* use inline assembly to speed up things a bit */
-
-#if defined( __GNUC__ ) && defined( i386 )
-
- FT_Long result;
+#ifdef FT_MULFIX_ASSEMBLER
+ return FT_MULFIX_ASSEMBLER( a, b );
- __asm__ __volatile__ (
- "imul %%edx\n"
- "movl %%edx, %%ecx\n"
- "sarl $31, %%ecx\n"
- "addl $0x8000, %%ecx\n"
- "addl %%ecx, %%eax\n"
- "adcl $0, %%edx\n"
- "shrl $16, %%eax\n"
- "shll $16, %%edx\n"
- "addl %%edx, %%eax\n"
- "mov %%eax, %0\n"
- : "=r"(result)
- : "a"(a), "d"(b)
- : "%ecx"
- );
- return result;
+#elif 0
-#elif 1
+ /*
+ * This code is nonportable. See comment below.
+ *
+ * However, on a platform where right-shift of a signed quantity fills
+ * the leftmost bits by copying the sign bit, it might be faster.
+ */
FT_Long sa, sb;
FT_ULong ua, ub;
@@ -445,6 +455,24 @@
if ( a == 0 || b == 0x10000L )
return a;
+ /*
+ * This is a clever way of converting a signed number `a' into its
+ * absolute value (stored back into `a') and its sign. The sign is
+ * stored in `sa'; 0 means `a' was positive or zero, and -1 means `a'
+ * was negative. (Similarly for `b' and `sb').
+ *
+ * Unfortunately, it doesn't work (at least not portably).
+ *
+ * It makes the assumption that right-shift on a negative signed value
+ * fills the leftmost bits by copying the sign bit. This is wrong.
+ * According to K&R 2nd ed, section `A7.8 Shift Operators' on page 206,
+ * the result of right-shift of a negative signed value is
+ * implementation-defined. At least one implementation fills the
+ * leftmost bits with 0s (i.e., it is exactly the same as an unsigned
+ * right shift). This means that when `a' is negative, `sa' ends up
+ * with the value 1 rather than -1. After that, everything else goes
+ * wrong.
+ */
sa = ( a >> ( sizeof ( a ) * 8 - 1 ) );
a = ( a ^ sa ) - sa;
sb = ( b >> ( sizeof ( b ) * 8 - 1 ) );
@@ -512,13 +540,14 @@
FT_UInt32 q;
- s = a; a = FT_ABS(a);
- s ^= b; b = FT_ABS(b);
+ /* XXX: this function does not allow 64-bit arguments */
+ s = (FT_Int32)a; a = FT_ABS( a );
+ s ^= (FT_Int32)b; b = FT_ABS( b );
if ( b == 0 )
{
/* check for division by 0 */
- q = 0x7FFFFFFFL;
+ q = (FT_UInt32)0x7FFFFFFFL;
}
else if ( ( a >> 16 ) == 0 )
{
@@ -535,7 +564,7 @@
temp2.hi = 0;
temp2.lo = (FT_UInt32)( b >> 1 );
FT_Add64( &temp, &temp2, &temp );
- q = ft_div64by32( temp.hi, temp.lo, b );
+ q = ft_div64by32( temp.hi, temp.lo, (FT_Int32)b );
}
return ( s < 0 ? -(FT_Int32)q : (FT_Int32)q );
@@ -666,6 +695,110 @@
#endif /* FT_LONG64 */
+ /* documentation is in ftglyph.h */
+
+ FT_EXPORT_DEF( void )
+ FT_Matrix_Multiply( const FT_Matrix* a,
+ FT_Matrix *b )
+ {
+ FT_Fixed xx, xy, yx, yy;
+
+
+ if ( !a || !b )
+ return;
+
+ xx = FT_MulFix( a->xx, b->xx ) + FT_MulFix( a->xy, b->yx );
+ xy = FT_MulFix( a->xx, b->xy ) + FT_MulFix( a->xy, b->yy );
+ yx = FT_MulFix( a->yx, b->xx ) + FT_MulFix( a->yy, b->yx );
+ yy = FT_MulFix( a->yx, b->xy ) + FT_MulFix( a->yy, b->yy );
+
+ b->xx = xx; b->xy = xy;
+ b->yx = yx; b->yy = yy;
+ }
+
+
+ /* documentation is in ftglyph.h */
+
+ FT_EXPORT_DEF( FT_Error )
+ FT_Matrix_Invert( FT_Matrix* matrix )
+ {
+ FT_Pos delta, xx, yy;
+
+
+ if ( !matrix )
+ return FT_Err_Invalid_Argument;
+
+ /* compute discriminant */
+ delta = FT_MulFix( matrix->xx, matrix->yy ) -
+ FT_MulFix( matrix->xy, matrix->yx );
+
+ if ( !delta )
+ return FT_Err_Invalid_Argument; /* matrix can't be inverted */
+
+ matrix->xy = - FT_DivFix( matrix->xy, delta );
+ matrix->yx = - FT_DivFix( matrix->yx, delta );
+
+ xx = matrix->xx;
+ yy = matrix->yy;
+
+ matrix->xx = FT_DivFix( yy, delta );
+ matrix->yy = FT_DivFix( xx, delta );
+
+ return FT_Err_Ok;
+ }
+
+
+ /* documentation is in ftcalc.h */
+
+ FT_BASE_DEF( void )
+ FT_Matrix_Multiply_Scaled( const FT_Matrix* a,
+ FT_Matrix *b,
+ FT_Long scaling )
+ {
+ FT_Fixed xx, xy, yx, yy;
+
+ FT_Long val = 0x10000L * scaling;
+
+
+ if ( !a || !b )
+ return;
+
+ xx = FT_MulDiv( a->xx, b->xx, val ) + FT_MulDiv( a->xy, b->yx, val );
+ xy = FT_MulDiv( a->xx, b->xy, val ) + FT_MulDiv( a->xy, b->yy, val );
+ yx = FT_MulDiv( a->yx, b->xx, val ) + FT_MulDiv( a->yy, b->yx, val );
+ yy = FT_MulDiv( a->yx, b->xy, val ) + FT_MulDiv( a->yy, b->yy, val );
+
+ b->xx = xx; b->xy = xy;
+ b->yx = yx; b->yy = yy;
+ }
+
+
+ /* documentation is in ftcalc.h */
+
+ FT_BASE_DEF( void )
+ FT_Vector_Transform_Scaled( FT_Vector* vector,
+ const FT_Matrix* matrix,
+ FT_Long scaling )
+ {
+ FT_Pos xz, yz;
+
+ FT_Long val = 0x10000L * scaling;
+
+
+ if ( !vector || !matrix )
+ return;
+
+ xz = FT_MulDiv( vector->x, matrix->xx, val ) +
+ FT_MulDiv( vector->y, matrix->xy, val );
+
+ yz = FT_MulDiv( vector->x, matrix->yx, val ) +
+ FT_MulDiv( vector->y, matrix->yy, val );
+
+ vector->x = xz;
+ vector->y = yz;
+ }
+
+
/* documentation is in ftcalc.h */
FT_BASE_DEF( FT_Int32 )
@@ -709,7 +842,7 @@
FT_Pos out_x,
FT_Pos out_y )
{
- FT_Int result;
+ FT_Long result; /* avoid overflow on 16-bit system */
/* deal with the trivial cases quickly */
@@ -758,8 +891,9 @@
FT_Int64 z1, z2;
- ft_multo64( in_x, out_y, &z1 );
- ft_multo64( in_y, out_x, &z2 );
+ /* XXX: this function does not allow 64-bit arguments */
+ ft_multo64( (FT_Int32)in_x, (FT_Int32)out_y, &z1 );
+ ft_multo64( (FT_Int32)in_y, (FT_Int32)out_x, &z2 );
if ( z1.hi > z2.hi )
result = +1;
@@ -775,7 +909,8 @@
#endif
}
- return result;
+ /* XXX: only the sign of return value, +1/0/-1 must be used */
+ return (FT_Int)result;
}