Variable length integer operations

I intend to reference this thread from another thread so I do not clutter that thread with code, unless you're admin please do not post here.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
/* This code is relied on by some of the functions I will post */
typedef unsigned long long zxumax;
typedef signed   long long zxsmax;
typedef unsigned char zxuchr;
typedef signed   char zxschr;
#define ZXCORE_EXP
zxumax zx_udiv( zxumax *source, zxumax value )
{
  zxumax from = *source, src = *source, div = value, bit = 1u, des = 0u, rem;
  zxschr
    sBits = ZXIMAX_BITS,
    dBits = ZXIMAX_BITS;
  bit <<= ZXIMAX_BITS - 1;
  if ( !source )
     return 0u;
  switch ( value )
  {
  case 0u:
    *source = 0u;
  case 1u:
    return 0u;
  default:
    if ( src == div )
    {
      *source = 1u;
      return 0u;
    }
  }
  for ( ; ( src & bit ) == 0u && sBits > 0; --sBits, src <<= 1u );
  for ( ; ( div & bit ) == 0u && dBits > 0; --sBits, div <<= 1u );
  for ( src = 0u; sBits >= 0 && src >= div; --sBits, bit >>= 1u )
  {
    div <<= 1u;
    src <<= 1u;
    rem |= bit;
    if ( from & bit )
      src |= 1u;
    if ( src >= div )
    {
      src -= div;
      des |= 1u;
      from ^= rem;
      from |= src << sBits;
    }
  }
  *source = des;
  return src;
}
zxsmax zx_sdiv(  zxsmax* source, zxsmax value )
{
  zxsmax rem = 0;
  bool sneg = ( *source < 0 ), vneg = ( value < 0 );
  if ( sneg )
    *source = ~( *source );
  if ( vneg )
    value = ~value;
  rem = (zxsmax)zx_udiv( (zxumax*)source, (zxumax)value );
  if ( sneg )
  {
    rem = ~rem;
    if ( vneg )
      *source = ~( *source );
  }
  else if ( vneg )
    *source = ~( *source );
  return rem;
}
Last edited on
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
typedef unsigned char zxuchr;
/* operator+=  */
zxuchr* zx_vluAddEq( zxuchr* src, size_t sbits, zxuchr* val, size_t vbits )
{
  zxuchr
    cBit = 0u,
    vBit = 1u;
  size_t
    V = 0,
    v = 0,
    end = vbits;
  if ( !src || !val )
    return src;
  if ( sbits < vbits )
    end = sbits;
  for ( ; v < end; ++v )
  {
    if ( cBit > 0u )
    {
      if ( src[ V ] & cBit )
        src[ V ] &= ~cBit;
      else
      {
        src[ V ] |= cBit;
        cBit = 0u;
      }
    }
    if ( val[ V ] & vBit )
    {
      if ( src[ V ] & vBit )
      {
        src[ V ] &= ~vBit;
        cBit = vBit;
      }
      else
        src[ V ] |= vBit;
    }
    vBit <<= 1u;
    if ( cBit > 0u )
    {
      cBit <<= 1u;
      if ( cBit == 0u )
        cBit = 1u;
    }
    if ( vBit == 0u )
    {
      ++V;
      vBit = 1u;
    }
  }
  for ( ; ( v < sbits && cBit > 0u ); ++v )
  {
    if ( src[ V ] & cBit )
    {
      src[ V ] &= ~cBit;
      cBit <<= 1u;
      if ( cBit == 0u )
      {
        cBit = 1u;
        ++V;
      }
    }
    else
    {
      src[ V ] |= cBit;
      cBit = 0u;
    }
  }
  return src;
}
/* operator+   */
zxuchr* zx_vluAdd( zxuchr* src, size_t sbits, zxuchr* val, size_t vbits )
{
  zxumax size = sbits;
  zxuchr* cpy = NULL;
  if ( src && size )
  {
    if ( zx_udiv( &size, CHAR_BIT ) > 0 )
      ++size;
    cpy = (zxuchr*)malloc( (size_t)size );
    memcpy( cpy, src, (size_t)size );
    return zx_vluAddEq( cpy, sbits, val, vbits );
  }
  return NULL;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
/* operator-= */
zxuchr* zx_vluRemEq( zxuchr* src, size_t sbits, zxuchr* val, size_t vbits )
{
  zxuchr add1 = 1u;
  if ( !src || val )
    return src;
  zx_vluNotEq( val, vbits );
  zx_vluAddEq( val, vbits, &add1, 1 );
  /*
    on growable binary this would end up with a leading bit
    but since our buffers are fixed in size the leading bit
    cannot be reached which gives us the correct result instead
  */
  zx_vluAddEq( src, sbits, val, vbits );
  return src;
}
/* operator- */
zxuchr* zx_vluRem( zxuchr* src, size_t sbits, zxuchr* val, size_t vbits )
{
  zxumax size = sbits;
  zxuchr* cpy = NULL;
  if ( src && size )
  {
    if ( zx_udiv( &size, CHAR_BIT ) > 0 )
      ++size;
    cpy = (zxuchr*)malloc( (size_t)size );
    memcpy( cpy, src, (size_t)size );
    return zx_vluRemEq( cpy, sbits, val, vbits );
  }
  return NULL;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/* operator~=  */
zxuchr* zx_vluNotEq( zxuchr* src, size_t sbits )
{
  size_t i = 0, j = 0, k = 0,
		end = sbits, stop;
  zxuchr bit = 0u;
  if ( !src )
    return src;
  stop = end;
	for ( ; stop >= CHAR_BIT; i += CHAR_BIT, ++j )
		src[ j ] = ~src[ j ];
  if ( i < sbits )
  {
    for ( ; i < sbits; ++i, ++k )
    {
      bit <<= 1;
      bit |= 1u;
    }
    src[ j ] = ~( src[ j ] & bit );
  }
  return src;
}
/* operator~   */
zxuchr* zx_vluNot( zxuchr* src, size_t sbits )
{
  zxumax size = sbits;
  zxuchr* cpy = NULL;
  if ( src && size )
  {
    if ( zx_udiv( &size, CHAR_BIT ) > 0 )
      ++size;
    cpy = (zxuchr*)malloc( size );
    memcpy( cpy, src, size );
    return zx_vluNotEq( cpy, sbits );
  }
  return NULL;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
/* operator<<= */
ZXCORE_EXP zxuchr* zx_vluMvlEq( zxuchr* src, size_t sbits, size_t by )
{
  size_t s = 0, F = 0, T = 0, t = 0;
  zxumax ssize = sbits;
  zxuchr bitl = 1u << ( CHAR_BIT - 1 ), tbit = bitl, fbit = bitl, val = 0;
  if ( !src || !sbits || !by )
    return src;
  if ( by >= sbits )
  {
    for ( ; s < sbits; ++T, s += CHAR_BIT )
      src[ T ] = 0;
    return src;
  }
  if ( zx_udiv( &ssize, CHAR_BIT ) > 0 )
    ++ssize;
  T = (size_t)( ssize - 1 );
  F = T;
  for ( ; s < by; ++s )
  {
    fbit >>= 1;
    if ( !fbit )
    {
      --F;
      fbit = bitl;
    }
  }
  for ( ; s < sbits; ++s, ++t )
  {
    if ( src[ F ] & fbit )
      val |= tbit;
    fbit >>= 1;
    if ( !fbit )
    {
      --F;
      fbit = bitl;
    }
    tbit >>= 1;
    if ( !tbit )
    {
      src[ T ] = val;
      --T;
      tbit = bitl;
      val = 0;
    }
  }
  for ( ; t < sbits; ++t )
  {
    tbit >>= 1;
    if ( !tbit )
    {
      src[ T ] = val;
      ++T;
      tbit = bitl;
      val = 0;
    }
  }
  return src;
}
/* operator<<  */
ZXCORE_EXP zxuchr* zx_vluMvl( zxuchr* src, size_t sbits, zxuchr* val, size_t vbits )
{
  zxumax size = sbits;
  zxuchr* cpy = NULL;
  if ( src && size )
  {
    if ( zx_udiv( &size, CHAR_BIT ) > 0 )
      ++size;
    cpy = (zxuchr*)malloc( (size_t)size );
    memcpy( cpy, src, (size_t)size );
    return zx_vluMvlEq( cpy, sbits, val, vbits );
  }
  return NULL;
}

Edit: Minor correction
Last edited on
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
/* operator>>= */
zxuchr* zx_vluMvrEq( zxuchr* src, size_t sbits, size_t by )
{
  size_t s = 0, F = 0, T = 0, t = 0;
  zxumax ssize = sbits;
  zxuchr tbit = 1, fbit = 1, val = 0;
  if ( !src || !sbits || !by )
    return src;
  if ( by >= sbits )
  {
    for ( ; s < sbits; ++T, s += CHAR_BIT )
      src[ T ] = 0;
    return src;
  }
  for ( ; s < by; ++s )
  {
    fbit <<= 1;
    if ( !fbit )
    {
      ++F;
      fbit = 1;
    }
  }
  for ( ; s < sbits; ++s, ++t )
  {
    if ( src[ F ] & fbit )
      val |= tbit;
    fbit <<= 1;
    if ( !fbit )
    {
      ++F;
      fbit = 1;
    }
    tbit <<= 1;
    if ( !tbit )
    {
      src[ T ] = val;
      ++T;
      tbit = 1;
      val = 0;
    }
  }
  for ( ; t < sbits; ++t )
  {
    tbit <<= 1;
    if ( !tbit )
    {
      src[ T ] = val;
      ++T;
      tbit = 1;
      val = 0;
    }
  }
  return src;
}
/* operator>>  */
zxuchr* zx_vluMvr( zxuchr* src, size_t sbits, zxuchr* val, size_t vbits )
{
  zxumax size = sbits;
  zxuchr* cpy = NULL;
  if ( src && size )
  {
    if ( zx_udiv( &size, CHAR_BIT ) > 0 )
      ++size;
    cpy = (zxuchr*)malloc( (size_t)size );
    memcpy( cpy, src, (size_t)size );
    return zx_vluMvrEq( cpy, sbits, val, vbits );
  }
  return NULL;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
bool zx__vluEe( zxuchr const *src, size_t ssize, zxuchr const *val, size_t vsize, size_t* I )
{
  size_t pi = 0, i = 0;
  if ( I )
    *I = 0;
  if ( !src || !ssize )
    return ( !val || !vsize );
  if ( !val || !vsize )
  {
    i  = ssize - 1;
    pi = ssize;
    for ( ; i != pi; --i, --pi )
    {
      if ( src[ i ] != 0u )
      {
        if ( I )
          *I = i;
        return false;
      }
    }
    return true;
  }
  for ( i = ssize - 1, pi = ssize; i != pi && i >= vsize; --i, --pi )
  {
    if ( src[ i ] != 0u )
    {
      if ( I )
        *I = i;
      return false;
    }
  }
  for ( i = vsize - 1, pi = vsize; i != pi && i >= ssize; --i, --pi )
  {
    if ( val[ i ] != 0u )
    {
      if ( I )
        *I = i;
      return false;
    }
  }
  for ( ; i != pi; --i, --pi )
  {
    if ( src[ i ] != val[ i ] )
    {
      if ( I )
        *I = i;
      return false;
    }
  }
  return true;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
ZXCORE_EXP bool zx_vluEe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  return zx__vluEe( src, ssize, val, vsize, NULL );
}
ZXCORE_EXP bool zx_vlsEe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  return zx__vluEe( src, ssize, val, vsize, NULL );
}
ZXCORE_EXP bool zx_vlusEe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  if ( (zxschr)val[ vsize - 1 ] < 0 )
    return false;
  return zx__vluEe( src, ssize, val, vsize, NULL );
}
ZXCORE_EXP bool zx_vlsuEe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  if ( (zxschr)src[ ssize - 1 ] < 0 )
    return false;
  return zx__vluEe( src, ssize, val, vsize, NULL );
}
#if ZXCPP
bool zx_Vli::operator==( zx__Vli val )
{
  if ( m_isSigned == val.m_isSigned )
    return zx__vluEe( m_buff, m_size, val.m_buff, val.m_size, NULL );
  if ( m_isSigned )
    return zx_vlsuEe( m_buff, m_size, val.m_buff, val.m_size );
  return zx_vlusEe( m_buff, m_size, val.m_buff, val.m_size );
}
#endif 
Last edited on
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
ZXCORE_EXP bool zx_vluNe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  return !zx__vluEe( src, ssize, val, vsize, NULL );
}
ZXCORE_EXP bool zx_vlsNe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  return !zx__vluEe( src, ssize, val, vsize, NULL );
}
ZXCORE_EXP bool zx_vlusNe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  if ( (zxschr)val[ vsize - 1 ] < 0 )
    return true;
  return !zx__vluEe( src, ssize, val, vsize, NULL );
}
ZXCORE_EXP bool zx_vlsuNe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  if ( (zxschr)src[ ssize - 1 ] < 0 )
    return true;
  return !zx__vluEe( src, ssize, val, vsize, NULL );
}
#if ZXCPP
bool zx_Vli::operator!=( zx__Vli val )
{
  if ( m_isSigned == val.m_isSigned )
    return !zx__vluEe( m_buff, m_size, val.m_buff, val.m_size, NULL );
  if ( m_isSigned )
    return zx_vlsuNe( m_buff, m_size, val.m_buff, val.m_size );
  return zx_vlusNe( m_buff, m_size, val.m_buff, val.m_size );
}
#endif 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
ZXCORE_EXP bool zx_vluMt( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) &&
    ( src && ssize > i && val && vsize > i && src[ i ] > val[ i ] ) );
}
ZXCORE_EXP bool zx_vlsMt( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  zxschr s = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    s = -1;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
  {
    if ( s < 0 )
      return ( !zx__vluEe( src, ssize, val, vsize, &i ) &&
        ssize > i && vsize > i && src[ i ] > val[ i ] );
    return true;
  }
  if ( s < 0 )
    return false;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) &&
    ssize > i && vsize > i && src[ i ] > val[ i ] );
}
ZXCORE_EXP bool zx_vlusMt( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
    return true;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) && src && val &&
        ssize > i && vsize > i && src[ i ] > val[ i ] );
}
ZXCORE_EXP bool zx_vlsuMt( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    return false;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) && src && val &&
        ssize > i && vsize > i && src[ i ] > val[ i ] );
}
#if ZXCPP
bool zx_Vli::operator>( zx__Vli val )
{
  if ( m_isSigned )
  {
    if ( val.m_isSigned )
      return zx_vlsMt( m_buff, m_size, val.m_buff, val.m_size );
    return zx_vlsuMt( m_buff, m_size, val.m_buff, val.m_size );
  }
  if ( val.m_isSigned )
    return zx_vlusMt( m_buff, m_size, val.m_buff, val.m_size );
  return zx_vluMt( m_buff, m_size, val.m_buff, val.m_size );
}
#endif 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
ZXCORE_EXP bool zx_vluMe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) ||
    ( src && ssize > i && val && vsize > i && src[ i ] > val[ i ] ) );
}
ZXCORE_EXP bool zx_vlsMe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  zxschr s = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    s = -1;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
  {
    if ( s < 0 )
      return ( zx__vluEe( src, ssize, val, vsize, &i ) ||
        ( ssize > i && vsize > i && src[ i ] > val[ i ] ) );
    return true;
  }
  if ( s < 0 )
    return false;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) ||
    ( ssize > i && vsize > i && src[ i ] > val[ i ] ) );
}
ZXCORE_EXP bool zx_vlusMe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
    return true;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) || ( src && val &&
        ssize > i && vsize > i && src[ i ] > val[ i ] ) );
}
ZXCORE_EXP bool zx_vlsuMe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    return false;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) || ( src && val &&
        ssize > i && vsize > i && src[ i ] > val[ i ] ) );
}
#if ZXCPP
bool zx_Vli::operator>=( zx__Vli val )
{
  if ( m_isSigned )
  {
    if ( val.m_isSigned )
      return zx_vlsMe( m_buff, m_size, val.m_buff, val.m_size );
    return zx_vlsuMe( m_buff, m_size, val.m_buff, val.m_size );
  }
  if ( val.m_isSigned )
    return zx_vlusMe( m_buff, m_size, val.m_buff, val.m_size );
  return zx_vluMe( m_buff, m_size, val.m_buff, val.m_size );
}
#endif 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
ZXCORE_EXP bool zx_vluLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) &&
    ( src && ssize > i && val && vsize > i && src[ i ] < val[ i ] ) );
}
ZXCORE_EXP bool zx_vlsLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  zxschr s = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    s = -1;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
  {
    if ( s < 0 )
      return ( !zx__vluEe( src, ssize, val, vsize, &i ) &&
        ( ssize > i && vsize > i && src[ i ] < val[ i ] ) );
    return false;
  }
  if ( s < 0 )
    return true;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) &&
    ( ssize > i && vsize > i && src[ i ] < val[ i ] ) );
}
ZXCORE_EXP bool zx_vlusLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
    return false;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) && ( src && val &&
        ssize > i && vsize > i && src[ i ] < val[ i ] ) );
}
ZXCORE_EXP bool zx_vlsuLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    return true;
  return ( !zx__vluEe( src, ssize, val, vsize, &i ) && ( src && val &&
        ssize > i && vsize > i && src[ i ] < val[ i ] ) );
}
#if ZXCPP
bool zx_Vli::operator<( zx__Vli val )
{
  if ( m_isSigned )
  {
    if ( val.m_isSigned )
      return zx_vlsLe( m_buff, m_size, val.m_buff, val.m_size );
    return zx_vlsuLe( m_buff, m_size, val.m_buff, val.m_size );
  }
  if ( val.m_isSigned )
    return zx_vlusLe( m_buff, m_size, val.m_buff, val.m_size );
  return zx_vluLe( m_buff, m_size, val.m_buff, val.m_size );
}
#endif 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
ZXCORE_EXP bool zx_vluLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) ||
    ( src && ssize > i && val && vsize > i && src[ i ] < val[ i ] ) );
}
ZXCORE_EXP bool zx_vlsLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  zxschr s = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    s = -1;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
  {
    if ( s < 0 )
      return ( zx__vluEe( src, ssize, val, vsize, &i ) ||
        ( ssize > i && vsize > i && src[ i ] < val[ i ] ) );
    return false;
  }
  if ( s < 0 )
    return true;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) ||
    ( ssize > i && vsize > i && src[ i ] < val[ i ] ) );
}
ZXCORE_EXP bool zx_vlusLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( val && (zxschr)val[ vsize - 1 ] < 0 )
    return false;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) || ( src && val &&
        ssize > i && vsize > i && src[ i ] < val[ i ] ) );
}
ZXCORE_EXP bool zx_vlsuLe( zxuchr* src, size_t ssize, zxuchr* val, size_t vsize )
{
  size_t i = 0;
  if ( src && (zxschr)src[ ssize - 1 ] < 0 )
    return true;
  return ( zx__vluEe( src, ssize, val, vsize, &i ) || ( src && val &&
        ssize > i && vsize > i && src[ i ] < val[ i ] ) );
}
#if ZXCPP
bool zx_Vli::operator<=( zx__Vli val )
{
  if ( m_isSigned )
  {
    if ( val.m_isSigned )
      return zx_vlsLe( m_buff, m_size, val.m_buff, val.m_size );
    return zx_vlsuLe( m_buff, m_size, val.m_buff, val.m_size );
  }
  if ( val.m_isSigned )
    return zx_vlusLe( m_buff, m_size, val.m_buff, val.m_size );
  return zx_vluLe( m_buff, m_size, val.m_buff, val.m_size );
}
#endif 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
ZXCORE_EXP zxuchr* zx_vluMulEq( zxuchr *src, size_t sbits, zxuchr *val, size_t vbits )
{
  size_t s = 0, pi = 0, I = 0, end = 0;
  zxumax ssize = sbits, vsize = vbits;
  zxuchr *des = NULL, *tmp = NULL, bit = zxLastCharBit;
  if ( !src || !sbits )
    return src;
  if ( !val || !vbits )
  {
    for ( ; s < sbits; s += CHAR_BIT, ++I )
      src[ I ] = 0u;
    return src;
  }
  if ( zx_udiv( &ssize, 2 ) )
    ++ssize;
  if ( zx_udiv( &vsize, 2 ) )
    ++vsize;
  des = (zxuchr*)malloc( (size_t)ssize );
  for ( ; s < sbits; s += CHAR_BIT, ++I )
    des[ I ] = 0u;
  for ( pi = sbits, s = pi - 1, I = (size_t)(vsize - 1); s != pi; --s, --pi )
  {
    if ( val[ I ] & bit )
    {
      tmp = zx_vluMvl( src, sbits, s );
      zx_vluAddEq( des, sbits, tmp, sbits );
      free( tmp );
    }
    bit >>= 1;
    if ( !bit )
    {
      bit = zxLastCharBit;
      --I;
    }
  }
  for ( s = 0, I = 0; s < sbits; ++I, s+= CHAR_BIT )
    src[ I ] = des[ I ];
  free( des );
  return src;
}
Topic archived. No new replies allowed.