Convert tabs to spaces in files largely copied from elsewhere

This commit is contained in:
Eduardo Almeida
2022-09-06 17:29:34 +01:00
parent 3e39cb0724
commit 6854e62de8
3 changed files with 365 additions and 365 deletions

View File

@@ -26,7 +26,7 @@
* The Initial Developer of the Original Code is Keith Packard
*
* Contributor(s):
* Keith R. Packard <keithp@keithp.com>
* Keith R. Packard <keithp@keithp.com>
*
*/

View File

@@ -26,7 +26,7 @@
* The Initial Developer of the Original Code is Keith Packard
*
* Contributor(s):
* Keith R. Packard <keithp@keithp.com>
* Keith R. Packard <keithp@keithp.com>
*
* Code changes for ns-3 from upstream are marked with `//PDB'
*/
@@ -51,7 +51,7 @@ const char * cairo_impl64 = "uint64_t";
cairo_uquorem64_t
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
{
cairo_uquorem64_t qr;
cairo_uquorem64_t qr;
qr.quo = num / den;
qr.rem = num % den;
@@ -65,7 +65,7 @@ const char * cairo_impl64 = "uint32_t";
cairo_uint64_t
_cairo_uint32_to_uint64 (uint32_t i)
{
cairo_uint64_t q;
cairo_uint64_t q;
q.lo = i;
q.hi = 0;
@@ -75,7 +75,7 @@ _cairo_uint32_to_uint64 (uint32_t i)
cairo_int64_t
_cairo_int32_to_int64 (int32_t i)
{
cairo_uint64_t q;
cairo_uint64_t q;
q.lo = i;
q.hi = i < 0 ? -1 : 0;
@@ -85,7 +85,7 @@ _cairo_int32_to_int64 (int32_t i)
static cairo_uint64_t
_cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
{
cairo_uint64_t q;
cairo_uint64_t q;
q.lo = l;
q.hi = h;
@@ -95,38 +95,38 @@ _cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
cairo_uint64_t
_cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
cairo_uint64_t s;
s.hi = a.hi + b.hi;
s.lo = a.lo + b.lo;
if (s.lo < a.lo)
s.hi++;
s.hi++;
return s;
}
cairo_uint64_t
_cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
cairo_uint64_t s;
s.hi = a.hi - b.hi;
s.lo = a.lo - b.lo;
if (s.lo > a.lo)
s.hi--;
s.hi--;
return s;
}
#define uint32_lo(i) ((i) & 0xffff)
#define uint32_hi(i) ((i) >> 16)
#define uint32_carry16 ((1) << 16)
#define uint32_lo(i) ((i) & 0xffff)
#define uint32_hi(i) ((i) >> 16)
#define uint32_carry16 ((1) << 16)
cairo_uint64_t
_cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
{
cairo_uint64_t s;
uint16_t ah, al, bh, bl;
uint32_t r0, r1, r2, r3;
uint16_t ah, al, bh, bl;
uint32_t r0, r1, r2, r3;
al = uint32_lo (a);
ah = uint32_hi (a);
@@ -139,9 +139,9 @@ _cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
r3 = (uint32_t) ah * bh;
r1 += uint32_hi(r0); /* no carry possible */
r1 += r2; /* but this can carry */
if (r1 < r2) /* check */
r3 += uint32_carry16;
r1 += r2; /* but this can carry */
if (r1 < r2) /* check */
r3 += uint32_carry16;
s.hi = r3 + uint32_hi(r1);
s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
@@ -154,16 +154,16 @@ _cairo_int32x32_64_mul (int32_t a, int32_t b)
cairo_int64_t s;
s = _cairo_uint32x32_64_mul ((uint32_t) a, (uint32_t) b);
if (a < 0)
s.hi -= b;
s.hi -= b;
if (b < 0)
s.hi -= a;
s.hi -= a;
return s;
}
cairo_uint64_t
_cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
cairo_uint64_t s;
s = _cairo_uint32x32_64_mul (a.lo, b.lo);
s.hi += a.lo * b.hi + a.hi * b.lo;
@@ -175,14 +175,14 @@ _cairo_uint64_lsl (cairo_uint64_t a, int shift)
{
if (shift >= 32)
{
a.hi = a.lo;
a.lo = 0;
shift -= 32;
a.hi = a.lo;
a.lo = 0;
shift -= 32;
}
if (shift)
{
a.hi = a.hi << shift | a.lo >> (32 - shift);
a.lo = a.lo << shift;
a.hi = a.hi << shift | a.lo >> (32 - shift);
a.lo = a.lo << shift;
}
return a;
}
@@ -192,33 +192,33 @@ _cairo_uint64_rsl (cairo_uint64_t a, int shift)
{
if (shift >= 32)
{
a.lo = a.hi;
a.hi = 0;
shift -= 32;
a.lo = a.hi;
a.hi = 0;
shift -= 32;
}
if (shift)
{
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = a.hi >> shift;
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = a.hi >> shift;
}
return a;
}
#define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
#define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
cairo_int64_t
_cairo_uint64_rsa (cairo_int64_t a, int shift)
{
if (shift >= 32)
{
a.lo = a.hi;
a.hi = _cairo_uint32_rsa (a.hi, 31);
shift -= 32;
a.lo = a.hi;
a.hi = _cairo_uint32_rsa (a.hi, 31);
shift -= 32;
}
if (shift)
{
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = _cairo_uint32_rsa (a.hi, shift);
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = _cairo_uint32_rsa (a.hi, shift);
}
return a;
}
@@ -227,7 +227,7 @@ int
_cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
{
return (a.hi < b.hi ||
(a.hi == b.hi && a.lo < b.lo));
(a.hi == b.hi && a.lo < b.lo));
}
int
@@ -240,9 +240,9 @@ int
_cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
{
if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
return 1;
return 1;
if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
return 0;
return 0;
return _cairo_uint64_lt (a, b);
}
@@ -260,7 +260,7 @@ _cairo_uint64_negate (cairo_uint64_t a)
a.lo = ~a.lo;
a.hi = ~a.hi;
if (++a.lo == 0)
++a.hi;
++a.hi;
return a;
}
@@ -270,30 +270,30 @@ _cairo_uint64_negate (cairo_uint64_t a)
cairo_uquorem64_t
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
{
cairo_uquorem64_t qr;
cairo_uint64_t bit;
cairo_uint64_t quo;
cairo_uquorem64_t qr;
cairo_uint64_t bit;
cairo_uint64_t quo;
bit = _cairo_uint32_to_uint64 (1);
/* normalize to make den >= num, but not overflow */
while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
{
bit = _cairo_uint64_lsl (bit, 1);
den = _cairo_uint64_lsl (den, 1);
bit = _cairo_uint64_lsl (bit, 1);
den = _cairo_uint64_lsl (den, 1);
}
quo = _cairo_uint32_to_uint64 (0);
/* generate quotient, one bit at a time */
while (bit.hi | bit.lo)
{
if (_cairo_uint64_le (den, num))
{
num = _cairo_uint64_sub (num, den);
quo = _cairo_uint64_add (quo, bit);
}
bit = _cairo_uint64_rsl (bit, 1);
den = _cairo_uint64_rsl (den, 1);
if (_cairo_uint64_le (den, num))
{
num = _cairo_uint64_sub (num, den);
quo = _cairo_uint64_add (quo, bit);
}
bit = _cairo_uint64_rsl (bit, 1);
den = _cairo_uint64_rsl (den, 1);
}
qr.quo = quo;
qr.rem = num;
@@ -305,24 +305,24 @@ _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
cairo_quorem64_t
_cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den)
{
int num_neg = _cairo_int64_negative (num);
int den_neg = _cairo_int64_negative (den);
cairo_uquorem64_t uqr;
cairo_quorem64_t qr;
int num_neg = _cairo_int64_negative (num);
int den_neg = _cairo_int64_negative (den);
cairo_uquorem64_t uqr;
cairo_quorem64_t qr;
if (num_neg)
num = _cairo_int64_negate (num);
num = _cairo_int64_negate (num);
if (den_neg)
den = _cairo_int64_negate (den);
den = _cairo_int64_negate (den);
uqr = _cairo_uint64_divrem (num, den);
if (num_neg)
qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
else
qr.rem = uqr.rem;
qr.rem = uqr.rem;
if (num_neg != den_neg)
qr.quo = (cairo_int64_t) _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
qr.quo = (cairo_int64_t) _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
else
qr.quo = (cairo_int64_t) uqr.quo;
qr.quo = (cairo_int64_t) uqr.quo;
return qr;
}
@@ -333,7 +333,7 @@ const char * cairo_impl128 = "uint128_t";
cairo_uquorem128_t
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
{
cairo_uquorem128_t qr;
cairo_uquorem128_t qr;
qr.quo = num / den;
qr.rem = num % den;
@@ -347,7 +347,7 @@ const char * cairo_impl128 = "cairo_uint64_t";
cairo_uint128_t
_cairo_uint32_to_uint128 (uint32_t i)
{
cairo_uint128_t q;
cairo_uint128_t q;
q.lo = _cairo_uint32_to_uint64 (i);
q.hi = _cairo_uint32_to_uint64 (0);
@@ -357,7 +357,7 @@ _cairo_uint32_to_uint128 (uint32_t i)
cairo_int128_t
_cairo_int32_to_int128 (int32_t i)
{
cairo_int128_t q;
cairo_int128_t q;
q.lo = _cairo_int32_to_int64 (i);
q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
@@ -367,7 +367,7 @@ _cairo_int32_to_int128 (int32_t i)
cairo_uint128_t
_cairo_uint64_to_uint128 (cairo_uint64_t i)
{
cairo_uint128_t q;
cairo_uint128_t q;
q.lo = i;
q.hi = _cairo_uint32_to_uint64 (0);
@@ -377,7 +377,7 @@ _cairo_uint64_to_uint128 (cairo_uint64_t i)
cairo_int128_t
_cairo_int64_to_int128 (cairo_int64_t i)
{
cairo_int128_t q;
cairo_int128_t q;
q.lo = i;
q.hi = _cairo_int32_to_int64 (_cairo_int64_negative(i) ? -1 : 0);
@@ -387,40 +387,40 @@ _cairo_int64_to_int128 (cairo_int64_t i)
cairo_uint128_t
_cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
cairo_uint128_t s;
s.hi = _cairo_uint64_add (a.hi, b.hi);
s.lo = _cairo_uint64_add (a.lo, b.lo);
if (_cairo_uint64_lt (s.lo, a.lo))
s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
return s;
}
cairo_uint128_t
_cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
cairo_uint128_t s;
s.hi = _cairo_uint64_sub (a.hi, b.hi);
s.lo = _cairo_uint64_sub (a.lo, b.lo);
if (_cairo_uint64_gt (s.lo, a.lo))
s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
return s;
}
#if HAVE_UINT64_T
#define uint64_lo32(i) ((i) & 0xffffffff)
#define uint64_hi32(i) ((i) >> 32)
#define uint64_lo(i) ((i) & 0xffffffff)
#define uint64_hi(i) ((i) >> 32)
#define uint64_lo32(i) ((i) & 0xffffffff)
#define uint64_hi32(i) ((i) >> 32)
#define uint64_lo(i) ((i) & 0xffffffff)
#define uint64_hi(i) ((i) >> 32)
#define uint64_shift32(i) ((i) << 32)
#define uint64_carry32 (((uint64_t) 1) << 32)
#define uint64_carry32 (((uint64_t) 1) << 32)
#else
#define uint64_lo32(i) ((i).lo)
#define uint64_hi32(i) ((i).hi)
#define uint64_lo32(i) ((i).lo)
#define uint64_hi32(i) ((i).hi)
static cairo_uint64_t
uint64_lo (cairo_uint64_t i)
@@ -459,9 +459,9 @@ static const cairo_uint64_t uint64_carry32 = { 0, 1 };
cairo_uint128_t
_cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint128_t s;
uint32_t ah, al, bh, bl;
cairo_uint64_t r0, r1, r2, r3;
cairo_uint128_t s;
uint32_t ah, al, bh, bl;
cairo_uint64_t r0, r1, r2, r3;
al = uint64_lo32 (a);
ah = uint64_hi32 (a);
@@ -474,13 +474,13 @@ _cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
r3 = _cairo_uint32x32_64_mul (ah, bh);
r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
if (_cairo_uint64_lt (r1, r2)) /* check */
r3 = _cairo_uint64_add (r3, uint64_carry32);
r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
if (_cairo_uint64_lt (r1, r2)) /* check */
r3 = _cairo_uint64_add (r3, uint64_carry32);
s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
s.lo = _cairo_uint64_add (uint64_shift32 (r1),
uint64_lo (r0));
uint64_lo (r0));
return s;
}
@@ -489,26 +489,26 @@ _cairo_int64x64_128_mul (cairo_int64_t a, cairo_int64_t b)
{
cairo_int128_t s;
s = _cairo_uint64x64_128_mul (_cairo_int64_to_uint64(a),
_cairo_int64_to_uint64(b));
_cairo_int64_to_uint64(b));
if (_cairo_int64_negative (a))
s.hi = _cairo_uint64_sub (s.hi,
_cairo_int64_to_uint64 (b));
s.hi = _cairo_uint64_sub (s.hi,
_cairo_int64_to_uint64 (b));
if (_cairo_int64_negative (b))
s.hi = _cairo_uint64_sub (s.hi,
_cairo_int64_to_uint64 (a));
s.hi = _cairo_uint64_sub (s.hi,
_cairo_int64_to_uint64 (a));
return s;
}
cairo_uint128_t
_cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
cairo_uint128_t s;
s = _cairo_uint64x64_128_mul (a.lo, b.lo);
s.hi = _cairo_uint64_add (s.hi,
_cairo_uint64_mul (a.lo, b.hi));
_cairo_uint64_mul (a.lo, b.hi));
s.hi = _cairo_uint64_add (s.hi,
_cairo_uint64_mul (a.hi, b.lo));
_cairo_uint64_mul (a.hi, b.lo));
return s;
}
@@ -517,15 +517,15 @@ _cairo_uint128_lsl (cairo_uint128_t a, int shift)
{
if (shift >= 64)
{
a.hi = a.lo;
a.lo = _cairo_uint32_to_uint64 (0);
shift -= 64;
a.hi = a.lo;
a.lo = _cairo_uint32_to_uint64 (0);
shift -= 64;
}
if (shift)
{
a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
_cairo_uint64_rsl (a.lo, (64 - shift)));
a.lo = _cairo_uint64_lsl (a.lo, shift);
a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
_cairo_uint64_rsl (a.lo, (64 - shift)));
a.lo = _cairo_uint64_lsl (a.lo, shift);
}
return a;
}
@@ -535,15 +535,15 @@ _cairo_uint128_rsl (cairo_uint128_t a, int shift)
{
if (shift >= 64)
{
a.lo = a.hi;
a.hi = _cairo_uint32_to_uint64 (0);
shift -= 64;
a.lo = a.hi;
a.hi = _cairo_uint32_to_uint64 (0);
shift -= 64;
}
if (shift)
{
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsl (a.hi, shift);
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsl (a.hi, shift);
}
return a;
}
@@ -553,15 +553,15 @@ _cairo_uint128_rsa (cairo_int128_t a, int shift)
{
if (shift >= 64)
{
a.lo = a.hi;
a.hi = _cairo_uint64_rsa (a.hi, 64-1);
shift -= 64;
a.lo = a.hi;
a.hi = _cairo_uint64_rsa (a.hi, 64-1);
shift -= 64;
}
if (shift)
{
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsa (a.hi, shift);
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsa (a.hi, shift);
}
return a;
}
@@ -570,17 +570,17 @@ int
_cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
{
return (_cairo_uint64_lt (a.hi, b.hi) ||
(_cairo_uint64_eq (a.hi, b.hi) &&
_cairo_uint64_lt (a.lo, b.lo)));
(_cairo_uint64_eq (a.hi, b.hi) &&
_cairo_uint64_lt (a.lo, b.lo)));
}
int
_cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
{
if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
return 1;
return 1;
if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
return 0;
return 0;
return _cairo_uint128_lt (a, b);
}
@@ -588,7 +588,7 @@ int
_cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
{
return (_cairo_uint64_eq (a.hi, b.hi) &&
_cairo_uint64_eq (a.lo, b.lo));
_cairo_uint64_eq (a.lo, b.lo));
}
#if HAVE_UINT64_T
@@ -600,30 +600,30 @@ _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
cairo_uquorem128_t
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
{
cairo_uquorem128_t qr;
cairo_uint128_t bit;
cairo_uint128_t quo;
cairo_uquorem128_t qr;
cairo_uint128_t bit;
cairo_uint128_t quo;
bit = _cairo_uint32_to_uint128 (1);
/* normalize to make den >= num, but not overflow */
while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
{
bit = _cairo_uint128_lsl (bit, 1);
den = _cairo_uint128_lsl (den, 1);
bit = _cairo_uint128_lsl (bit, 1);
den = _cairo_uint128_lsl (den, 1);
}
quo = _cairo_uint32_to_uint128 (0);
/* generate quotient, one bit at a time */
while (_cairo_uint128_ne (bit, _cairo_uint32_to_uint128(0)))
{
if (_cairo_uint128_le (den, num))
{
num = _cairo_uint128_sub (num, den);
quo = _cairo_uint128_add (quo, bit);
}
bit = _cairo_uint128_rsl (bit, 1);
den = _cairo_uint128_rsl (den, 1);
if (_cairo_uint128_le (den, num))
{
num = _cairo_uint128_sub (num, den);
quo = _cairo_uint128_add (quo, bit);
}
bit = _cairo_uint128_rsl (bit, 1);
den = _cairo_uint128_rsl (den, 1);
}
qr.quo = quo;
qr.rem = num;
@@ -651,24 +651,24 @@ _cairo_uint128_not (cairo_uint128_t a)
cairo_quorem128_t
_cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
{
int num_neg = _cairo_int128_negative (num);
int den_neg = _cairo_int128_negative (den);
cairo_uquorem128_t uqr;
cairo_quorem128_t qr;
int num_neg = _cairo_int128_negative (num);
int den_neg = _cairo_int128_negative (den);
cairo_uquorem128_t uqr;
cairo_quorem128_t qr;
if (num_neg)
num = _cairo_int128_negate (num);
num = _cairo_int128_negate (num);
if (den_neg)
den = _cairo_int128_negate (den);
den = _cairo_int128_negate (den);
uqr = _cairo_uint128_divrem (num, den);
if (num_neg)
qr.rem = _cairo_int128_negate (uqr.rem);
qr.rem = _cairo_int128_negate (uqr.rem);
else
qr.rem = uqr.rem;
qr.rem = uqr.rem;
if (num_neg != den_neg)
qr.quo = _cairo_int128_negate (uqr.quo);
qr.quo = _cairo_int128_negate (uqr.quo);
else
qr.quo = uqr.quo;
qr.quo = uqr.quo;
return qr;
}
@@ -683,7 +683,7 @@ _cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
* non-zero. */
cairo_uquorem64_t
_cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
cairo_uint64_t den)
cairo_uint64_t den)
{
cairo_uquorem64_t result;
cairo_uint64_t B = _cairo_uint32s_to_uint64 (1, 0);
@@ -699,94 +699,94 @@ _cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
/* Don't bother if the quotient is going to overflow. */
if (_cairo_uint64_ge (x, den)) {
return /* overflow */ result;
return /* overflow */ result;
}
if (_cairo_uint64_lt (x, B)) {
/* When the final quotient is known to fit in 32 bits, then
* num < 2^64 if and only if den < 2^32. */
return _cairo_uint64_divrem (_cairo_uint128_to_uint64 (num), den);
/* When the final quotient is known to fit in 32 bits, then
* num < 2^64 if and only if den < 2^32. */
return _cairo_uint64_divrem (_cairo_uint128_to_uint64 (num), den);
}
else {
/* Denominator is >= 2^32. the numerator is >= 2^64, and the
* division won't overflow: need two divrems. Write the
* numerator and denominator as
*
* num = xB + y x : 64 bits, y : 32 bits
* den = uB + v u, v : 32 bits
*/
uint32_t y = _cairo_uint128_to_uint32 (num);
uint32_t u = uint64_hi32 (den);
uint32_t v = _cairo_uint64_to_uint32 (den);
/* Denominator is >= 2^32. the numerator is >= 2^64, and the
* division won't overflow: need two divrems. Write the
* numerator and denominator as
*
* num = xB + y x : 64 bits, y : 32 bits
* den = uB + v u, v : 32 bits
*/
uint32_t y = _cairo_uint128_to_uint32 (num);
uint32_t u = uint64_hi32 (den);
uint32_t v = _cairo_uint64_to_uint32 (den);
/* Compute a lower bound approximate quotient of num/den
* from x/(u+1). Then we have
*
* x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
*
* xB + y = q(u+1)B + (rB+y)
* = q(uB + B + v - v) + (rB+y)
* = q(uB + v) + qB - qv + (rB+y)
* = q(uB + v) + q(B-v) + (rB+y)
*
* The true quotient of num/den then is q plus the
* contribution of q(B-v) + (rB+y). The main contribution
* comes from the term q(B-v), with the term (rB+y) only
* contributing at most one part.
*
* The term q(B-v) must fit into 64 bits, since q fits into 32
* bits on account of being a lower bound to the true
* quotient, and as B-v <= 2^32, we may safely use a single
* 64/64 bit division to find its contribution. */
/* Compute a lower bound approximate quotient of num/den
* from x/(u+1). Then we have
*
* x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
*
* xB + y = q(u+1)B + (rB+y)
* = q(uB + B + v - v) + (rB+y)
* = q(uB + v) + qB - qv + (rB+y)
* = q(uB + v) + q(B-v) + (rB+y)
*
* The true quotient of num/den then is q plus the
* contribution of q(B-v) + (rB+y). The main contribution
* comes from the term q(B-v), with the term (rB+y) only
* contributing at most one part.
*
* The term q(B-v) must fit into 64 bits, since q fits into 32
* bits on account of being a lower bound to the true
* quotient, and as B-v <= 2^32, we may safely use a single
* 64/64 bit division to find its contribution. */
cairo_uquorem64_t quorem;
cairo_uint64_t remainder; /* will contain final remainder */
uint32_t quotient; /* will contain final quotient. */
uint32_t q;
uint32_t r;
cairo_uquorem64_t quorem;
cairo_uint64_t remainder; /* will contain final remainder */
uint32_t quotient; /* will contain final quotient. */
uint32_t q;
uint32_t r;
/* Approximate quotient by dividing the high 64 bits of num by
* u+1. Watch out for overflow of u+1. */
if (u+1) {
quorem = _cairo_uint64_divrem (x, _cairo_uint32_to_uint64 (u+1));
q = _cairo_uint64_to_uint32 (quorem.quo);
r = _cairo_uint64_to_uint32 (quorem.rem);
}
else {
q = uint64_hi32 (x);
r = _cairo_uint64_to_uint32 (x);
}
quotient = q;
/* Approximate quotient by dividing the high 64 bits of num by
* u+1. Watch out for overflow of u+1. */
if (u+1) {
quorem = _cairo_uint64_divrem (x, _cairo_uint32_to_uint64 (u+1));
q = _cairo_uint64_to_uint32 (quorem.quo);
r = _cairo_uint64_to_uint32 (quorem.rem);
}
else {
q = uint64_hi32 (x);
r = _cairo_uint64_to_uint32 (x);
}
quotient = q;
/* Add the main term's contribution to quotient. Note B-v =
* -v as an uint32 (unless v = 0) */
if (v)
quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -(int32_t)v), den); //PDB cast
else
quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
quotient += _cairo_uint64_to_uint32 (quorem.quo);
/* Add the main term's contribution to quotient. Note B-v =
* -v as an uint32 (unless v = 0) */
if (v)
quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -(int32_t)v), den); //PDB cast
else
quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
quotient += _cairo_uint64_to_uint32 (quorem.quo);
/* Add the contribution of the subterm and start computing the
* true remainder. */
remainder = _cairo_uint32s_to_uint64 (r, y);
if (_cairo_uint64_ge (remainder, den)) {
remainder = _cairo_uint64_sub (remainder, den);
quotient++;
}
/* Add the contribution of the subterm and start computing the
* true remainder. */
remainder = _cairo_uint32s_to_uint64 (r, y);
if (_cairo_uint64_ge (remainder, den)) {
remainder = _cairo_uint64_sub (remainder, den);
quotient++;
}
/* Add the contribution of the main term's remainder. The
* funky test here checks that remainder + main_rem >= den,
* taking into account overflow of the addition. */
remainder = _cairo_uint64_add (remainder, quorem.rem);
if (_cairo_uint64_ge (remainder, den) ||
_cairo_uint64_lt (remainder, quorem.rem))
{
remainder = _cairo_uint64_sub (remainder, den);
quotient++;
}
/* Add the contribution of the main term's remainder. The
* funky test here checks that remainder + main_rem >= den,
* taking into account overflow of the addition. */
remainder = _cairo_uint64_add (remainder, quorem.rem);
if (_cairo_uint64_ge (remainder, den) ||
_cairo_uint64_lt (remainder, quorem.rem))
{
remainder = _cairo_uint64_sub (remainder, den);
quotient++;
}
result.quo = _cairo_uint32_to_uint64 (quotient);
result.rem = remainder;
result.quo = _cairo_uint32_to_uint64 (quotient);
result.rem = remainder;
}
return result;
}
@@ -794,35 +794,35 @@ _cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
cairo_quorem64_t
_cairo_int_96by64_32x64_divrem (cairo_int128_t num, cairo_int64_t den)
{
int num_neg = _cairo_int128_negative (num);
int den_neg = _cairo_int64_negative (den);
cairo_uint64_t nonneg_den;
cairo_uquorem64_t uqr;
cairo_quorem64_t qr;
int num_neg = _cairo_int128_negative (num);
int den_neg = _cairo_int64_negative (den);
cairo_uint64_t nonneg_den;
cairo_uquorem64_t uqr;
cairo_quorem64_t qr;
if (num_neg)
num = _cairo_int128_negate (num);
num = _cairo_int128_negate (num);
if (den_neg)
nonneg_den = _cairo_int64_negate (den);
nonneg_den = _cairo_int64_negate (den);
else
nonneg_den = den;
nonneg_den = den;
uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
if (_cairo_uint64_eq (uqr.rem, _cairo_int64_to_uint64 (nonneg_den))) {
/* bail on overflow. */
qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, UINT_MAX); //PDB cast
qr.rem = den;
return qr;
/* bail on overflow. */
qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, UINT_MAX); //PDB cast
qr.rem = den;
return qr;
}
if (num_neg)
qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
else
qr.rem = uqr.rem;
qr.rem = uqr.rem;
if (num_neg != den_neg)
qr.quo = _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
qr.quo = _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
else
qr.quo = uqr.quo;
qr.quo = uqr.quo;
return qr;
}

View File

@@ -107,7 +107,7 @@ extern "C" {
***
*
* NOTE: The FNV-0 historic hash is not recommended. One should use
* the FNV-1 hash instead.
* the FNV-1 hash instead.
*
* To use the 32 bit FNV-0 historic hash, pass FNV0_32_INIT as the
* Fnv32_t hashval argument to fnv_32_buf() or fnv_32_str().
@@ -140,10 +140,10 @@ extern "C" {
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
* Share and Enjoy! :-)
*/
#if !defined(__FNV_H__)
@@ -153,7 +153,7 @@ extern "C" {
//#include <sys/types.h> //PDB
#define FNV_VERSION "5.0.2" /**< @(#) FNV Version */
#define FNV_VERSION "5.0.2" /**< @(#) FNV Version */
/**
@@ -254,13 +254,13 @@ extern const Fnv64_t fnv1a_64_init;
* FNV hash types
*/
enum fnv_type {
FNV_NONE = 0, /**< invalid FNV hash type */
FNV0_32 = 1, /**< FNV-0 32 bit hash */
FNV1_32 = 2, /**< FNV-1 32 bit hash */
FNV1a_32 = 3, /**< FNV-1a 32 bit hash */
FNV0_64 = 4, /**< FNV-0 64 bit hash */
FNV1_64 = 5, /**< FNV-1 64 bit hash */
FNV1a_64 = 6, /**< FNV-1a 64 bit hash */
FNV_NONE = 0, /**< invalid FNV hash type */
FNV0_32 = 1, /**< FNV-0 32 bit hash */
FNV1_32 = 2, /**< FNV-1 32 bit hash */
FNV1a_32 = 3, /**< FNV-1a 32 bit hash */
FNV0_64 = 4, /**< FNV-0 64 bit hash */
FNV1_64 = 5, /**< FNV-1 64 bit hash */
FNV1a_64 = 6, /**< FNV-1a 64 bit hash */
};
//PDB test vector declarations deleted
@@ -342,10 +342,10 @@ enum fnv_type {
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
* Share and Enjoy! :-)
*/
//#include <stdlib.h> //PDB
@@ -362,34 +362,34 @@ enum fnv_type {
* fnv_32a_buf - perform a 32 bit Fowler/Noll/Vo FNV-1a hash on a buffer
*
* input:
* \param [in] buf start of buffer to hash
* \param [in] len length of buffer in octets
* \param [in] hval previous hash value or 0 if first call
* \param [in] buf start of buffer to hash
* \param [in] len length of buffer in octets
* \param [in] hval previous hash value or 0 if first call
*
* \returns 32 bit hash as a static hash type.
* \returns 32 bit hash as a static hash type.
*
* \note To use the recommended 32 bit FNV-1a hash, use FNV1_32A_INIT as the
* hval arg on the first call to either fnv_32a_buf() or fnv_32a_str().
* hval arg on the first call to either fnv_32a_buf() or fnv_32a_str().
*/
Fnv32_t
fnv_32a_buf(void *buf, size_t len, Fnv32_t hval)
{
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
/*
* FNV-1a hash each octet in the buffer
*/
while (bp < be) {
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*bp++;
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*bp++;
/* multiply by the 32 bit FNV magic prime mod 2^32 */
/* multiply by the 32 bit FNV magic prime mod 2^32 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_32_PRIME;
hval *= FNV_32_PRIME;
#else
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
#endif
}
@@ -402,32 +402,32 @@ fnv_32a_buf(void *buf, size_t len, Fnv32_t hval)
* fnv_32a_str - perform a 32 bit Fowler/Noll/Vo FNV-1a hash on a string
*
* input:
* \param [in] str string to hash
* \param [in] hval previous hash value or 0 if first call
* \param [in] str string to hash
* \param [in] hval previous hash value or 0 if first call
*
* \returns 32 bit hash as a static hash type
* \returns 32 bit hash as a static hash type
*
* \note To use the recommended 32 bit FNV-1a hash, use FNV1_32A_INIT as the
* hval arg on the first call to either fnv_32a_buf() or fnv_32a_str().
* hval arg on the first call to either fnv_32a_buf() or fnv_32a_str().
*/
Fnv32_t
fnv_32a_str(char *str, Fnv32_t hval)
{
unsigned char *s = (unsigned char *)str; /* unsigned string */
unsigned char *s = (unsigned char *)str; /* unsigned string */
/*
* FNV-1a hash each octet in the buffer
*/
while (*s) {
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*s++;
/* xor the bottom with the current octet */
hval ^= (Fnv32_t)*s++;
/* multiply by the 32 bit FNV magic prime mod 2^32 */
/* multiply by the 32 bit FNV magic prime mod 2^32 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_32_PRIME;
hval *= FNV_32_PRIME;
#else
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
#endif
}
@@ -486,10 +486,10 @@ fnv_32a_str(char *str, Fnv32_t hval)
* PERFORMANCE OF THIS SOFTWARE.
*
* By:
* chongo <Landon Curt Noll> /\oo/\
* chongo <Landon Curt Noll> /\oo/\
* http://www.isthe.com/chongo/
*
* Share and Enjoy! :-)
* Share and Enjoy! :-)
*/
//#include <stdlib.h> //PDB
@@ -511,8 +511,8 @@ const Fnv64_t fnv1a_64_init = { 0x84222325, 0xcbf29ce4 };
#if defined(HAVE_64BIT_LONG_LONG)
#define FNV_64_PRIME ((Fnv1aImplementation::Fnv64_t)0x100000001b3ULL)
#else /* HAVE_64BIT_LONG_LONG */
#define FNV_64_PRIME_LOW ((unsigned long)0x1b3) /* lower bits of FNV prime */
#define FNV_64_PRIME_SHIFT (8) /* top FNV prime shift above 2^32 */
#define FNV_64_PRIME_LOW ((unsigned long)0x1b3) /* lower bits of FNV prime */
#define FNV_64_PRIME_SHIFT (8) /* top FNV prime shift above 2^32 */
#endif /* HAVE_64BIT_LONG_LONG */
/**@}*/
@@ -521,20 +521,20 @@ const Fnv64_t fnv1a_64_init = { 0x84222325, 0xcbf29ce4 };
* fnv_64a_buf - perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer
*
* input:
* \param [in] buf start of buffer to hash
* \param [in] len length of buffer in octets
* \param [in] hval previous hash value or 0 if first call
* \param [in] buf start of buffer to hash
* \param [in] len length of buffer in octets
* \param [in] hval previous hash value or 0 if first call
*
* \returns 64 bit hash as a static hash type
* \returns 64 bit hash as a static hash type
*
* \note To use the recommended 64 bit FNV-1a hash, use FNV1A_64_INIT as the
* hval arg on the first call to either fnv_64a_buf() or fnv_64a_str().
* hval arg on the first call to either fnv_64a_buf() or fnv_64a_str().
*/
Fnv64_t
fnv_64a_buf(void *buf, size_t len, Fnv64_t hval)
{
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
unsigned char *bp = (unsigned char *)buf; /* start of buffer */
unsigned char *be = bp + len; /* beyond end of buffer */
#if defined(HAVE_64BIT_LONG_LONG)
/*
@@ -542,22 +542,22 @@ fnv_64a_buf(void *buf, size_t len, Fnv64_t hval)
*/
while (bp < be) {
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*bp++;
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*bp++;
/* multiply by the 64 bit FNV magic prime mod 2^64 */
/* multiply by the 64 bit FNV magic prime mod 2^64 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_64_PRIME;
hval *= FNV_64_PRIME;
#else /* NO_FNV_GCC_OPTIMIZATION */
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
#endif /* NO_FNV_GCC_OPTIMIZATION */
}
#else /* HAVE_64BIT_LONG_LONG */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
/*
* Convert Fnv64_t hval into a base 2^16 array
@@ -574,40 +574,40 @@ fnv_64a_buf(void *buf, size_t len, Fnv64_t hval)
*/
while (bp < be) {
/* xor the bottom with the current octet */
val[0] ^= (unsigned long)*bp++;
/* xor the bottom with the current octet */
val[0] ^= (unsigned long)*bp++;
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 0x100000001b3 we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 0x100000001b3 we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
}
/*
@@ -627,18 +627,18 @@ fnv_64a_buf(void *buf, size_t len, Fnv64_t hval)
* fnv_64a_str - perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer
*
* input:
* \param [in] str string to hash
* \param [in] hval previous hash value or 0 if first call
* \param [in] str string to hash
* \param [in] hval previous hash value or 0 if first call
*
* \returns 64 bit hash as a static hash type
* \returns 64 bit hash as a static hash type
*
* \note To use the recommended 64 bit FNV-1a hash, use FNV1A_64_INIT as the
* hval arg on the first call to either fnv_64a_buf() or fnv_64a_str().
* hval arg on the first call to either fnv_64a_buf() or fnv_64a_str().
*/
Fnv64_t
fnv_64a_str(char *str, Fnv64_t hval)
{
unsigned char *s = (unsigned char *)str; /* unsigned string */
unsigned char *s = (unsigned char *)str; /* unsigned string */
#if defined(HAVE_64BIT_LONG_LONG)
@@ -647,22 +647,22 @@ fnv_64a_str(char *str, Fnv64_t hval)
*/
while (*s) {
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*s++;
/* xor the bottom with the current octet */
hval ^= (Fnv64_t)*s++;
/* multiply by the 64 bit FNV magic prime mod 2^64 */
/* multiply by the 64 bit FNV magic prime mod 2^64 */
#if defined(NO_FNV_GCC_OPTIMIZATION)
hval *= FNV_64_PRIME;
hval *= FNV_64_PRIME;
#else /* NO_FNV_GCC_OPTIMIZATION */
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
hval += (hval << 1) + (hval << 4) + (hval << 5) +
(hval << 7) + (hval << 8) + (hval << 40);
#endif /* NO_FNV_GCC_OPTIMIZATION */
}
#else /* !HAVE_64BIT_LONG_LONG */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
unsigned long val[4]; /* hash value in base 2^16 */
unsigned long tmp[4]; /* tmp 64 bit value */
/*
* Convert Fnv64_t hval into a base 2^16 array
@@ -679,40 +679,40 @@ fnv_64a_str(char *str, Fnv64_t hval)
*/
while (*s) {
/* xor the bottom with the current octet */
/* xor the bottom with the current octet */
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 1099511628211, we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
val[0] ^= (unsigned long)(*s++);
/*
* multiply by the 64 bit FNV magic prime mod 2^64
*
* Using 1099511628211, we have the following digits base 2^16:
*
* 0x0 0x100 0x0 0x1b3
*
* which is the same as:
*
* 0x0 1<<FNV_64_PRIME_SHIFT 0x0 FNV_64_PRIME_LOW
*/
/* multiply by the lowest order digit base 2^16 */
tmp[0] = val[0] * FNV_64_PRIME_LOW;
tmp[1] = val[1] * FNV_64_PRIME_LOW;
tmp[2] = val[2] * FNV_64_PRIME_LOW;
tmp[3] = val[3] * FNV_64_PRIME_LOW;
/* multiply by the other non-zero digit */
tmp[2] += val[0] << FNV_64_PRIME_SHIFT; /* tmp[2] += val[0] * 0x100 */
tmp[3] += val[1] << FNV_64_PRIME_SHIFT; /* tmp[3] += val[1] * 0x100 */
/* propagate carries */
tmp[1] += (tmp[0] >> 16);
val[0] = tmp[0] & 0xffff;
tmp[2] += (tmp[1] >> 16);
val[1] = tmp[1] & 0xffff;
val[3] = tmp[3] + (tmp[2] >> 16);
val[2] = tmp[2] & 0xffff;
/*
* Doing a val[3] &= 0xffff; is not really needed since it simply
* removes multiples of 2^64. We can discard these excess bits
* outside of the loop when we convert to Fnv64_t.
*/
val[0] ^= (unsigned long)(*s++);
}
/*