gcc-68000 gnulib in *.s

Chuck Purcell cjp at SSESCO.com
Wed Mar 27 08:32:46 AEST 1991


I have seen three request for this type of information in the last month. I
prepared an assembly language version of the gnulib using gcc 1.35 on my 
ATARI-ST, now mostly in use at home as a Telebit T1000 driver. Use it if you
need it. Modify where necessary.

#!/bin/sh
# This is a shell archive (produced by shar 3.50)
# To extract the files from this archive, save it to a file, remove
# everything above the "!/bin/sh" line above, and type "sh file_name".
#
# made 01/27/1991 15:49 UTC by cjp at chasun
# Source directory /home/cjp/gcc.68000
#
# existing files will NOT be overwritten unless -c is specified
# This format requires very little intelligence at unshar time.
# "if test", "echo", "true", and "sed" may be needed.
#
# This shar contains:
# length  mode       name
# ------ ---------- ------------------------------------------
#   1789 -rw-rw-r-- MANIFEST
#  22635 -rw-rw-r-- dflonum.c
#  17998 -rw-rw-r-- dflonum.s
#    732 -rw-rw-r-- divsi3.s
#   4078 -rw-rw-r-- flonum.h
#   2224 -rw-rw-r-- frexp.c
#    922 -rw-rw-r-- frexp.s
#   7405 -rw-rw-r-- gnulib00.s
#   5014 -rw-rw-r-- gnulib30.s
#   4522 -rw-rw-r-- gnulib_.c
#    663 -rw-rw-r-- ldexp.c
#    360 -rw-rw-r-- ldexp.s
#   3348 -rw-rw-r-- modf.c
#   1450 -rw-rw-r-- modf.s
#    412 -rw-rw-r-- modsi3.s
#    734 -rw-rw-r-- mulsi3.s
#   6009 -rw-rw-r-- norm.c
#   1704 -rw-rw-r-- norm.s
#  15366 -rw-rw-r-- sflonum.s
#   1200 -rw-rw-r-- udivsi3.s
#    416 -rw-rw-r-- umodsi3.s
#    913 -rw-rw-r-- umulsi3.s
#
# ============= MANIFEST ==============
if test -f 'MANIFEST' -a X"$1" != X"-c"; then
	echo 'x - skipping MANIFEST (File already exists)'
else
echo 'x - extracting MANIFEST (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'MANIFEST' &&
Xtotal 218
Xdrwxrwxr-x   2 cjp      staff         512 Jan 27 09:33 ./
Xdrwxrwxr-x  18 cjp      staff        2048 Jan 27 09:27 ../
X-rw-rw-r--   1 cjp      staff           0 Jan 27 09:33 MANIFEST
X-rw-rw-r--   1 cjp      staff       22635 Jan 27 09:31 dflonum.c(1.35.bammi)
X-rw-rw-r--   1 cjp      staff ATARI 17998 Jan 27 09:32 dflonum.s(gcc-1.35 -S)
X-rw-rw-r--   1 cjp      staff         732 Jan 27 09:30 divsi3.s(1.35.bammi)
X-rw-rw-r--   1 cjp      staff        4078 Jan 27 09:32 flonum.h(1.35.bammi)
X-rw-rw-r--   1 cjp      staff        2224 Jan 27 09:31 frexp.c(1.35.bammi)
X-rw-rw-r--   1 cjp      staff ATARI   922 Jan 27 09:32 frexp.s(gcc-1.35 -S)
X-rw-rw-r--   1 cjp      staff ATARI  7405 Jan 27 09:29 gnulib00.s(Moto68000)
X-rw-rw-r--   1 cjp  staff  SUN 3/80  5014 Jan 27 09:29 gnulib30.s(Moto68030)
X-rw-rw-r--   1 cjp      staff        4522 Jan 27 09:29 gnulib_.c(gcc-1.34 C)
X-rw-rw-r--   1 cjp      staff         663 Jan 27 09:31 ldexp.c(1.35.bammi)
X-rw-rw-r--   1 cjp      staff ATARI   360 Jan 27 09:32 ldexp.s(gcc-1.35 -S)
X-rw-rw-r--   1 cjp      staff        3348 Jan 27 09:31 modf.c(1.35.bammi)
X-rw-rw-r--   1 cjp      staff ATARI  1450 Jan 27 09:32 modf.s(gcc-1.35 -S)
X-rw-rw-r--   1 cjp      staff         412 Jan 27 09:30 modsi3.s(1.35.bammi)
X-rw-rw-r--   1 cjp      staff         734 Jan 27 09:30 mulsi3.s(1.35.bammi)
X-rw-rw-r--   1 cjp      staff        6009 Jan 27 09:31 norm.c(1.35.bammi)
X-rw-rw-r--   1 cjp      staff ATARI  1704 Jan 27 09:32 norm.s(gcc-1.35 -S)
X-rw-rw-r--   1 cjp      staff       15366 Jan 27 09:30 sflonum.s(1.35.bammi)
X-rw-rw-r--   1 cjp      staff        1200 Jan 27 09:30 udivsi3.s(1.35.bammi)
X-rw-rw-r--   1 cjp      staff         416 Jan 27 09:30 umodsi3.s(1.35.bammi)
X-rw-rw-r--   1 cjp      staff         913 Jan 27 09:30 umulsi3.s(1.35.bammi)
SHAR_EOF
true || echo 'restore of MANIFEST failed'
fi
# ============= dflonum.c ==============
if test -f 'dflonum.c' -a X"$1" != X"-c"; then
	echo 'x - skipping dflonum.c (File already exists)'
else
echo 'x - extracting dflonum.c (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'dflonum.c' &&
X/* A working Gnulib68k, thanks to Scott McCauley for the origonal
X   version, and John Dunning (jrd at STONY-BROOK.SCRC.Symbolics.COM)
X   who got this working.
X   
X   Please not that only the double float. stuff is picked up from
X   here, the other long-int and single float stuff come from
X   fixnum.s and sflonum.s (see flonum.h for the appr. #defs).
X   ++jrb
X   */
X
X/* Subroutines needed by GCC output code for the 68000/20. 
X   
X   Compile using -O flag with gcc. 
X   Use the -m68000 flag if you have a 68000
X   
X   This package includes 32 bit integer and 64-bit floating
X   point routines.
X   
X   WARNING: alpha-test version (July 1988) -- probably full of bugs.
X   If you find a bug, send bugs reports to jsm at phoenix.princeton.edu,
X   or
X   
X   Scott McCauley
X   PPPL P. O. Box 451
X   Princeton NJ 08543
X   
X   Known bugs/features:
X   
X   1) Depending on the version of GCC, this may produce a lot of
X   warnings about conversion clashes, etc. It should still compile
X   as is. Also, there appears to be a bug in the register-allocation
X   parts of gcc that makes the compiler sometimes core dump
X   or run into an infinite loop. This version works -- if you
X   decide to change routines these compiler bugs can bite very hard....
X   
X   2) all single-precision gets converted to double precision in the
X   math routines (in accordance with K&R), so doing things in
X   double precision is faster than single precision....
X
X(not any more -- jrd )
X   
X   3) double precision floating point division may not be accurate to
X   the last four or so bits. Most other routines round to the
X   lsb.
X
X(not any more -- jrd )
X   
X   4) no support of NaN and Inf
X   
X   5) beware of undefined operations: i.e. a float to integer conversion
X   when the float is larger than MAXIINT.
X   
X   */
X
X#include "flonum.h"
X
X#ifdef __GCC_OLD__
X#define umultl _umulsi
X#define multl _mulsi3
X#define udivl _udivsi3 
X#define divl _divsi3
X#define ddiv _divdf3
X#define qmult _qmult
X#define dmult _muldf3
X#define dneg _negdf2
X#define dadd _adddf3
X#define dcmp _cmpdf2
X#define dtoul _fixunsdfsi
X#define dtol _fixdfsi
X#define ltod _floatsidf
X#else
X#define umultl __umulsi
X#define multl __mulsi3
X#define udivl __udivsi3 
X#define divl __divsi3
X#define ddiv __divdf3
X#define qmult __qmult
X#define dmult __muldf3
X#define dneg __negdf2
X#define dadd __adddf3
X#define dcmp __cmpdf2
X#define dtoul __fixunsdfsi
X#define dtol __fixdfsi
X#define ltod __floatsidf
X#endif
X
X#ifdef L_umulsi3
X
X/*_umulsi3 (a, b) unsigned a, b; { return a * b; } */
X
Xunsigned long umultl(a,b)
Xunsigned long a, b;
X{
X    register unsigned long d7, d6, d5, d4;
X    
X    d7 = a;
X    d6 = b;
X    d5 = d6;
X    d4 = d6;
X    /* without the next line, gcc may core dump. Gcc sometimes
X       gets confused if you have too many registers */
X    
X    &a; &b;
X    
X    /*printf("a %u b %u\n", a, b);*/
X    
X    /* low word */
X    MUL(d7, d6);
X    SWAP(d5);
X    MUL(d7, d5);
X    SWAP(d7);
X    MUL(d7, d4);
X    d4 += d5;
X    SWAP(d4);
X    d4 &= 0xFFFF0000;
X    d4 += d6;
X    return(d4);
X}
X#endif
X
X#ifdef L_mulsi3
X/* _mulsi3 (a, b) int a, b; { return a * b; } */
X
Xlong multl(a, b)
Xlong a, b;
X{
X    int sign = 0;
X    long umultl();
X    if ((a ^ b) < 0) sign = 1;
X    if (a < 0) a = -a;
X    if (b < 0) b = -b;
X    /*printf("a is %d b is %d\n", a, b);*/
X    if (sign) return(- umultl(a,b));
X    else return(umultl(a,b));
X}
X
X#endif
X
X#ifdef L_udivsi3
X/*_udivsi3 (a, b) unsigned a, b; { return a / b; } */
X
X/*
X  this routine based on one in the PD forth package for the sun by Mitch Bradley
X  */
X
Xunsigned udivl(u, v)
Xregister unsigned long u, v;
X{
X    register unsigned short um, ul, vm, vl;
X    unsigned long ans;
X    unsigned long u1, v1;
X    long i;
X    long rem;
X    
X    if (v == 0) {
X	/* should cause an exception condition */
X	DIV(u, v);
X	fprintf(stderr, "division by zero\n");
X    }
X    if (v > u) return(0);
X    
X    ul = u; SWAP(u); um = u;
X    vl = v; SWAP(v); vm = v;
X    if (vm == 0) {
X	u = vl; v = um;
X	DIV(u, v);
X	/* note -- if you delete the next line, gcc goes into
X	   an infinite loop */
X	if (vm) printf("result is %d\n", v);
X	vm = v & 0xFFFF; /* dividend is in low word */
X	v &= 0xFFFF0000; /* remainder is in high word */
X	v += ul;
X	DIV(vl, v);
X	v &= 0xFFFF; /* dividend is in low word */
X	u = vm;
X	SWAP(u);
X	return(v + u);
X	/*ans = ((um / vl) << 16) + ((((um % vl) << 16) + ul) / vl);
X	  return(ans);*/
X    }
X    
X    if (vl == 0) return(um / vm);
X    SWAP(u); SWAP(v);
X    if ( (u >> 3) < v) {
X	for(i = 0; u >= v; i++, u -= v);
X	/*printf("lt 8\n");*/
X	return(i);
X    }
X    u1 = u; v1 = v;
X    
X    /* scale divisor */
X    v1--;
X    for(i = 0; ((unsigned) v1) >= 0xFFFF; v1 >>= 1, i++);
X    if (++v1 > 0xFFFF) {
X	i++; v1 >>=1;
X    }
X    u1 >>= i;
X    /*printf("i is %d, u1 is %x, v1 is %x\n", i, u1, v1);*/
X    ans = u1 / v1;
X    rem = u - (ans * v);
X    if (rem > v) {ans++; rem -= v; }
X    if (rem > v) {printf("oops\n");}
X    return(ans);
X}
X#endif
X
X#ifdef L_divsi3
X
Xlong divl(a, b)
Xlong a, b;
X{
X    int sign = 0;
X    if ((a ^ b) < 0) sign = 1;
X    if (a < 0) a = -a;
X    if (b < 0) b = -b;
X    if (sign) return(-udivl(a,b));
X    else return(udivl(a,b));
X}
X#endif
X
X#ifdef L_umodsi3
X#ifdef __GCC_OLD__
X_umodsi3 (a, b)
X#else
X__umodsi3 (a, b)
X#endif
Xunsigned a, b;
X{
X    /*return a % b;*/
X    return (a - ((a/b)*b));
X}
X#endif
X
X#ifdef L_modsi3
X#ifdef __GCC_OLD__
X_modsi3 (a, b)
X#else
X__modsi3 (a, b)
X#endif
Xint a, b;
X{
X    /*return a % b;*/
X    return( a - ((a/b) * b));
X}
X#endif
X
X#ifdef L_lshrsi3
X#ifdef __GCC_OLD__
X_lshrsi3 (a, b)
X#else
X__lshrsi3 (a, b)
X#endif
Xunsigned a, b;
X{
X    return a >> b;
X}
X#endif
X
X#ifdef L_lshlsi3
X#ifdef __GCC_OLD__
X_lshlsi3 (a, b)
X#else
X__lshlsi3 (a, b)
X#endif
Xunsigned a, b;
X{
X    return a << b;
X}
X#endif
X
X#ifdef L_ashrsi3
X#ifdef __GCC_OLD__
X_ashrsi3 (a, b)
X#else
X__ashrsi3 (a, b)
X#endif
Xint a, b;
X{
X    return a >> b;
X}
X#endif
X
X#ifdef L_ashlsi3
X#ifdef __GCC_OLD__
X_ashlsi3 (a, b)
X#else
X__ashlsi3 (a, b)
X#endif
Xint a, b;
X{
X    return a << b;
X}
X#endif
X
X
X/* this divide stuff is hopelessly broken, in addition to being much
X   more complicated than in needs to be.  The new version's at the
X   end of this file */
X
X#if 0
X#ifdef L_divdf3
X
X/*double _divdf3 (a, b) double a, b; { return a / b; } */
X
Xdouble drecip1(f1)
Xdouble f1;
X{
X    struct bitdouble *bdp = &f1;
X    unsigned m1, m2;
X    
X    printf("drecip1(%X)", f1);
X    if (bdp->exp == 0 ) return(0L);
X    if (bdp->mant1 == 0L) {
X	bdp->exp = 0x3ff + 0x3ff - bdp->exp;
X	bdp->mant2 = 0L;
X	return(f1);
X    }
X    bdp->exp = 0x3ff + 0x3ff - bdp->exp - 1;
X    m1 = (0x00100000 + bdp->mant1) >> 5;
X    m2 = (0x80000000 / m1);
X    /*printf("m1 %x m2 %x\n", m1, m2);*/
X    m2 <<= 5;
X    m2 &= 0xFFFFF;
X    /*printf("exp %x mant %x\n", bdp->exp, m2);*/
X    bdp->mant1 = m2;
X    bdp->mant2 = 0L;
X    printf("drecip1->%X\n", f1);
X    return(f1);
X}
X
Xdouble drecip(f)
Xdouble f;
X{
X    struct bitdouble *bdp;
X    double quotient, remainder;
X    
X    printf("drecip(%X)", f);
X    quotient = drecip1(f);
X    remainder = /* 1.0 */ ((double)one) - quotient * f;
X    bdp = &remainder;
X    for(; bdp->exp > 0x3ca; ) {
X	printf("drc: exp=%X ", bdp->exp);
X	remainder = /* 1.0 */ ((double)one) - (quotient*f);
X	printf("rem=%X ", remainder);
X	quotient = quotient + (drecip1(f)*remainder);
X    }
X    printf("drecip->%X\n", quotient);
X    return(quotient);
X}
X
X
Xdouble ddiv(f1, f2)
Xdouble f1, f2;
X{
X    return(f1 * drecip(f2));
X}
X#endif
X#endif			/* commented out divide routines */
X
X#ifdef L_muldf3
X/*double _muldf3 (a, b) double a, b; { return a * b; } */
X
X#ifdef SLOW_KLUDGEY_QMULT
X__qmult(m11, m12, m21, m22, p1, p2)
Xunsigned long m11, m12, m21, m22, *p1, *p2;
X{
X/*    register unsigned long d2 = m11; */
X    register long d2 = m11;
X    register unsigned long d3 = m12, d4 = m21, d5 = m22, d6 =0, d7 = 0;
X    int i;
X    /* guess what happens if you delete the next line.... */
X    /*	&i; */
X    for (i = 0; i < 11; i++) ASL2(d2, d3);
X    for (i = 0; i < 9; i++) ASL2(d4, d5);
X    
X    for (i = 0; i < 64; i++) {
X	if (d2 < 0) { ADD2(d4, d5, d6, d7);}
X	ASL2(d2, d3);
X	ASR2(d4, d5);
X    }
X    d2 = d6;
X    d3 = d7;
X    for (i = 0; i < 9; i++) ASR2(d2, d3);
X    *p1 = d2; *p2 = d3;
X}
X
X#else
X/* new qmult */
X
X/* a set of totally local kludges, for summing partial results into
X   result vector */
X/*
X#define XADDL(partial, target_ptr) \
X	{ register unsigned long temp = *target_ptr; \
X	asm volatile("addl %2,%0" : "=d" (temp) : "0" (temp), "g" (partial)); \
X	*target_ptr-- = temp; temp = *target_ptr; \
X	asm volatile("addxl #0,%0" : "=d" (temp) : "0" (temp)); \
X	*target_ptr = temp; }
X*/
X
Xstatic unsigned long constant_zero_kludge = 0;
X#define XXADDL(partial, target) \
X	{ register unsigned long * zero = &constant_zero_kludge + 1; \
X	  asm volatile("addl  %3,%0@;\
X			addxl %1 at -,%0 at -" \
X			: "=a" (target), "=a" (zero) \
X			: "0" (target), "g" (partial), "1" (zero)); \
X	}
X
X/*
X#define ADDL(partial, target_ptr) \
X	{ register unsigned long temp = *target_ptr; \
X	asm volatile("addl %2,%0" : "=d" (temp) : "0" (temp), "g" (partial)); \
X	*target_ptr-- = temp }
X
X#define ADDXL(partial, target_ptr) \
X	{ register unsigned long temp = *target_ptr; \
X	asm volatile("addxl %2,%0" : "=d" (temp) : "0" (temp), "g" (partial)); \
X	*target_ptr-- = temp }
X	
X#define ADDW(partial, target_ptr) \
X	{ register unsigned short temp = *(unsigned short * )target_ptr; \
X	asm volatile("addw %2,%0" : "=d" (temp) : "0" (temp), "g" (partial)); \
X	*(unsigned short * )target_ptr-- = temp }
X
X#define ADDXW(partial, target_ptr) \
X	{ register unsigned sort temp = *(unsigned short * )target_ptr; \
X	asm volatile("addxw %2,%0" : "=d" (temp) : "0" (temp), "g" (partial)); \
X	*(unsigned short * )target_ptr-- = temp }
X*/	
X
Xstatic char component_index[] = 
X	{
X	3, 3,				/* last ones */
X
X	2, 3,				/* next to last x last */
X	3, 2,				/* ... */
X
X	1, 3,
X	2, 2,
X	3, 1,
X
X	0, 3,
X	1, 2,
X	2, 1,
X	3, 0,
X	
X	0, 2,
X	1, 1,
X	2, 0,
X	
X	0, 1,
X	1, 0,
X	
X	0, 0
X	};
X
Xqmult(m1_hi, m1_lo, m2_hi, m2_lo, result_hi, result_lo)
Xunsigned long m1_hi, m1_lo, m2_hi, m2_lo, * result_hi, * result_lo;
X{
X  unsigned short * m1 = (unsigned short * )(&m1_hi);
X  unsigned short * m2 = (unsigned short * )(&m2_hi);
X  unsigned short result[10];		/* two extra for XADDL */
X  register unsigned long mult_register;
X  register unsigned long res1, res2, res3;
X  long * kludge;
X  short i, j;
X  char * idx_p = (char * )&component_index;
X/*
Xfprintf(stderr, "  qmult: %08X:%08X * %08X:%08X\n", 
X	m1_hi, m1_lo, m2_hi, m2_lo);
X*/
X  for (i = 0 ; i < 10 ; i++) result[i] = 0;
X
X/* walk thru 'vectors' of mantissa pieces, doing unsigned multiplies
X   and summing results into result vector.  Note that the order is 
X   chosen to guarantee that no more adds than generated by XADDL are
X   needed.  */
X
X  for ( ; ; )
X	{
X	i = *idx_p++;
X	j = *idx_p++;
X	mult_register = m1[i]; 
X	MUL(m2[j], mult_register);
X	kludge = (long * )(&result[i + j + 2]);
X	XXADDL(mult_register, kludge);
X/*
Xfprintf(stderr, "  qmult: %d[%04X] * %d[%04X] -> %08X\n",
X  i, m1[i], j, m2[j], mult_register);
Xfprintf(stderr, "       %04X %04X %04X %04X %04X %04X %04X %04X\n",
X  result[2], result[3], result[4], result[5], 
X  result[6], result[7], result[8], result[9]);
X*/
X	if ((i == 0) && (j == 0))
X		break;
X	}
X
X  /* get the result shifted to the right place.  Unfortunately, we need
X     the 53 bits that's 22 bits down in the result vec.  sigh */
X  kludge = (long * )(&result[2]);
X  res1 = *kludge++;
X  res2 = *kludge++;
X  res3 = *kludge;
X  for (i = 0 ; i < 12 ; i++)
X	ASL3(res1, res2, res3);
X  /* now put the result back */
X  *result_hi = res1;
X  *result_lo = res2;
X}
X#endif
X
X
Xdouble dmult(f1, f2)
Xdouble f1, f2;
X{
X    register long d2;
X    register unsigned d3, d4, d5, d6, d7;
X    unsigned long p1, p2;
X    
X    struct bitdouble
X	*bdp1 = (struct bitdouble *)&f1,
X	*bdp2 = (struct bitdouble *)&f2;
X    int exp1, exp2, i;
X    
X    exp1 = bdp1->exp; exp2 = bdp2->exp;
X    /* check for zero */
X    if (! exp1) return(0.0);
X    if (! exp2) return(0.0);
X    d2 = 0x00100000 + bdp1->mant1;
X    d3 = bdp1->mant2;
X    d4 = 0x00100000 + bdp2->mant1;
X    d5 = bdp2->mant2;
X    __qmult(d2, d3, d4, d5, &p1, &p2);
X    d6 = p1; d7 = p2;
X    
X    if (d6 & 0x00200000) {
X	ASR2(d6, d7);
X	exp1++;
X    }
X    
X    if (bdp1->sign == bdp2->sign) bdp1->sign = 0;
X    else bdp1->sign = 1;
X    bdp1->exp = exp1 + exp2 - 0x3ff;
X    bdp1->mant1 = d6;
X    bdp1->mant2 = d7;
X    return(f1);
X}
X#endif
X
X#ifdef L_negdf2
X/*double _negdf2 (a) double a; { return -a; } */
X
Xdouble dneg(num)
Xdouble num;
X{
X    long *i = (long *)#
X	if (*i)			/* only negate if non-zero */
X	    *i ^= 0x80000000;
X    return(num);
X}
X#endif
X
X#ifdef L_adddf3
X/*double _adddf3 (a, b) double a, b; { return a + b; } */
X
Xdouble dadd(f1, f2)
Xdouble f1, f2;
X{
X    
X    register long d4, d5, d6, d7;
X    struct bitdouble
X	*bdp1 = (struct bitdouble *)&f1,
X    	*bdp2 = (struct bitdouble *)&f2;
X    short exp1, exp2, sign1, sign2, howmuch, temp;
X    
X    exp1 = bdp1->exp; exp2 = bdp2->exp;
X    
X    /* check for zero */
X    if (! exp1) return(f2); if (! exp2) return(f1);
X    
X    /* align them */
X    if (exp1 < exp2) {
X	bdp1 = (struct bitdouble *)&f2; bdp2 = (struct bitdouble *)&f1;
X	exp1 = bdp1->exp; exp2 = bdp2->exp;
X    }
X    howmuch = exp1 - exp2;
X    if (howmuch > 53) return(f1);
X    
X    d7 = bdp2->mant1 + 0x00100000;
X    d6 = bdp2->mant2;
X    
X    d5 = bdp1->mant1 + 0x00100000;
X    d4 = bdp1->mant2;
X    
X    for (temp = 0; temp < howmuch; temp++) ASR2(d7, d6);
X    
X    /* take care of negative signs */
X    if (bdp1->sign) 
X    {
X	NEG(d5, d4);
X    }
X    if (bdp2->sign)
X    {
X	NEG(d7, d6);
X    }
X    
X    ADD2(d7, d6, d5, d4);
X    bdp1 = (struct bitdouble *)&f1;
X    
X    if (d5 < 0) {
X	NEG(d5, d4);
X	bdp1->sign = 1;
X    } else bdp1->sign = 0;
X    
X    if (d5 == 0 && d4 == 0) return(0.0);
X    
X    for(howmuch = 0; d5 >= 0; howmuch++) ASL2(d5, d4);
X    
X    ASL2(d5, d4);
X    for (temp = 0; temp < 12; temp++) ASR2(d5, d4);
X    bdp1->mant1 = d5;
X    bdp1->mant2 = d4;
X    bdp1->exp = exp1 + 11 - howmuch;
X    return(f1); 
X}
X
X#endif
X
X#ifdef L_subdf3
Xdouble
X#ifdef __GCC_OLD__
X    _subdf3 (a, b)
X#else
X    __subdf3 (a, b)
X#endif
Xdouble a, b;
X{
X    return a+(-b);
X}
X#endif
X
X#ifdef L_cmpdf2
X/*
X  int _cmpdf2 (a, b) double a, b; { if (a > b) return 1;
X  else if (a < b) return -1; return 0; } 
X  */
X
Xint dcmp(f1, f2)
Xdouble f1, f2;
X{
X    struct bitdouble *bdp1, *bdp2;
X    unsigned int s1, s2;
X    bdp1 = (struct bitdouble *)&f1;
X    bdp2 = (struct bitdouble *)&f2;
X    s1 = bdp1->sign;
X    s2 = bdp2->sign;
X    if (s1 > s2) return(-1);
X    if (s1 < s2) return(1);
X    /* if sign of both negative, switch them */
X    if (s1 != 0) {
X	bdp1 = (struct bitdouble *)&f2;
X	bdp2 = (struct bitdouble *)&f1;
X    }
X    s1 = bdp1->exp;
X    s2 = bdp2->exp;
X    if (s1 > s2) return(1);
X    if (s1 < s2) return(-1);
X    /* same exponent -- have to compare mantissa */
X    s1 = bdp1->mant1;
X    s2 = bdp2->mant1;
X    if (s1 > s2) return(1);
X    if (s1 < s2) return(-1);
X    s1 = bdp1->mant2;
X    s2 = bdp2->mant2;
X    if (s1 > s2) return(1);
X    if (s1 < s2) return(-1);
X    return(0); /* the same! */
X}
X#endif
X
X#ifdef L_fixunsdfsi
X/*_fixunsdfsi (a) double a; { return (unsigned int) a; } */
X
X/* #ifdef L_fixdfsi _fixdfsi (a) double a; { return (int) a; } #endif */
X
Xunsigned long dtoul(f)
Xdouble f;
X{
X    struct bitdouble *bdp;
X    int si, ex, mant1, mant2;
X    bdp = (struct bitdouble *)&f;
X    si = bdp->sign;
X    ex = bdp->exp;
X    mant1 = bdp->mant1 + 0x00100000;
X    mant2 = bdp->mant2;
X    
X    /* zero value */
X    if (ex == 0) return(0L);
X    /* less than 1 */
X    if (ex < 0x3ff) return(0L);
X    /* less than 0 */
X    if (si ) return(0L);
X    mant1 <<= 10;
X    mant1 += (mant2 >> 22);
X    mant1 >>= 30 + (0x3ff - ex);
X    return(mant1);
X}
X
Xlong dtol(f)
Xdouble f;
X{
X    struct bitdouble *bdp = (struct bitdouble *)&f;
X    
X    if (bdp->sign) {
X	bdp->sign = 0;
X	return( - dtoul(f));
X    }
X    return(dtoul(f));
X}
X#endif
X
X#ifdef L_fixunsdfdi
X
X/*double _fixunsdfdi (a) double a; { union double_di u;
X  u.i[LOW] = (unsigned int) a; u.i[HIGH] = 0; return u.d; } */
X#endif
X
X
X#ifdef L_fixdfdi
Xdouble
X#ifdef __GCC_OLD__
X    _fixdfdi (a)
X#else
X    __fixdfdi (a)
X#endif
Xdouble a;
X{
X    union double_di u;
X    u.i[LOW] = (int) a;
X    u.i[HIGH] = (int) a < 0 ? -1 : 0;
X    return u.d;
X}
X#endif
X
X#ifdef L_floatsidf
X/* double _floatsidf (a) int a; { return (double) a; } */
X
Xdouble ltod(i)
Xlong i;
X{
X    int expo, shift;
X    double retval;
X    struct bitdouble *bdp = (struct bitdouble *)&retval;
X    if (i == 0) {
X	long *t = (long *)&retval;
X	t[0] = 0L;
X	t[1] = 0L;
X	return(retval);
X    }
X    if (i < 0) {
X	bdp->sign = 1;
X	i = -i;
X    } else bdp->sign = 0;
X    shift = i;
X    for (expo = 0x3ff + 31 ; shift > 0; expo--, shift <<= 1);
X    shift <<= 1;
X    bdp->exp = expo;
X    bdp->mant1 = shift >> 12;
X    bdp->mant2 = shift << 20;
X    return(retval);
X}
X
X#endif
X
X#ifdef L_floatdidf
X/* ok as is -- will call other routines */
Xdouble
X#ifdef __GCC_OLD__
X    _floatdidf (u)
X#else
X    __floatdidf (u)
X#endif
Xunion double_di u;
X{
X    register double hi
X	= ((double) u.i[HIGH]) * (double) 0x10000 * (double) 0x10000;
X    register double low = (unsigned int) u.i[LOW];
X    return hi + low;
X}
X#endif
X
X#ifdef L_addsf3
XSFVALUE
X    _addsf3 (a, b)
Xunion flt_or_int a, b;
X{
X    union flt_or_int intify; return INTIFY ((double) a.f + (double) b.f);
X}
X#endif
X
X#ifdef L_negsf2
XSFVALUE
X    _negsf2 (a)
Xunion flt_or_int a;
X{
X    union flt_or_int intify;
X    return INTIFY (-((double) a.f));
X}
X#endif
X
X#ifdef L_subsf3
XSFVALUE
X    _subsf3 (a, b)
Xunion flt_or_int a, b;
X{
X    union flt_or_int intify;
X    return INTIFY (((double) a.f - (double) b.f));
X}
X#endif
X
X#ifdef L_cmpsf2
XSFVALUE
X    _cmpsf2 (a, b)
Xunion flt_or_int a, b;
X{
X    union flt_or_int intify;
X    double a1, b1;
X    a1 = a.f; b1 = b.f;
X    if ( a1 > b1)
X	return 1;
X    else if (a1 < b1)
X	return -1;
X    return 0;
X}
X#endif
X
X#ifdef L_mulsf3
XSFVALUE
X    _mulsf3 (a, b)
Xunion flt_or_int a, b;
X{
X    union flt_or_int intify;
X    return INTIFY (((double) a.f * (double) b.f));
X}
X#endif
X
X#ifdef L_divsf3
XSFVALUE
X    _divsf3 (a, b)
Xunion flt_or_int a, b;
X{
X    union flt_or_int intify;
X    return INTIFY (((double) a.f / (double) b.f));
X}
X#endif
X
X#ifdef L_truncdfsf2
Xfloat __dtof(d)
Xdouble d;
X{
X    struct bitdouble *bdp = (struct bitdouble *)&d;
X    float retval;
X    struct bitfloat *bfp = (struct bitfloat *)&retval;
X    int tempval;
X    
X    bfp->sign = bdp->sign;
X    if (bdp->exp == 0) return ((float) 0.0);
X    bfp->exp = bdp->exp - 0x400 + 0x80;
X    tempval = (bdp->mant1 << 4 ) + ((0xF0000000 & bdp->mant2) >> 28);
X    /* round */
X    tempval++;
X    if (tempval == 0x01000000) bfp->exp++;
X    bfp->mant = tempval >> 1;
X    return(retval);
X}
X
XSFVALUE
X#ifdef __GCC_OLD__
X    _truncdfsf2 (a)
X#else
X    __truncdfsf2 (a)
X#endif
Xdouble a;
X{
X    union flt_or_int intify;
X    return INTIFY (__dtof(a));
X}
X#endif
X
X#ifdef L_extendsfdf2
Xdouble __ftod(f)
Xunion flt_or_int f;
X{
X    double retval;
X    struct bitfloat *bfp = (struct bitfloat *)&f.f;
X    struct bitdouble *bdp = (struct bitdouble *)&retval;
X    if (bfp->exp == 0) return(0.0);
X    bdp->sign = bfp->sign;
X    bdp->exp = 0x400 - 0x80 + bfp->exp;
X    bdp->mant1 = bfp->mant >> 3;
X    bdp->mant2 = (bfp->mant & 0x7) << 29;
X    /*printf("returning %f from extendsfdf2\n", retval);*/
X    return(retval);
X}
X
Xdouble
X#ifdef __GCC_OLD__
X    _extendsfdf2 (a)
X#else
X    __extendsfdf2 (a)
X#endif
Xunion flt_or_int a;
X{
X    union flt_or_int intify;
X    double retval;
X    return (__ftod(a));
X}
X#endif
X
X#ifdef L_divdf3
X
X/* new double-float divide routine, by jrd */
X/* thanks jrd !! */
X
X#ifdef __GCC_OLD__
Xdouble _divdf3(num, denom)
X#else
Xdouble __divdf3(num, denom)
X#endif
Xdouble num, denom;
X{
X    double local_num = num;
X    double local_denom = denom;
X    struct bitdouble * num_bdp = (struct bitdouble *)(&local_num);
X    struct bitdouble * den_bdp = (struct bitdouble *)(&local_denom);
X    short num_exp = num_bdp->exp,
X          den_exp = den_bdp->exp;
X    short result_sign = 0;
X    /*  register */ unsigned long num_m1, num_m2, num_m3, num_m4;
X    register unsigned long den_m1, den_m2, den_m3, den_m4;
X    unsigned long result_mant[3];
X    unsigned long result_mask;
X    short result_idx;
X    
X    if ((num_exp == 0) || (den_exp == 0))		/* zzz should really cause trap */
X  	return(0.0);
X    
X    /* deal with signs */
X    result_sign = result_sign + num_bdp->sign - den_bdp->sign;
X    
X    /* unpack the numbers */
X    num_m1 = num_bdp->mant1 | 0x00100000;		/* hidden bit */
X    num_m2 = num_bdp->mant2;
X    num_m4 = num_m3 = 0;
X    den_m1 = den_bdp->mant1 | 0x00100000;		/* hidden bit */
X    den_m2 = den_bdp->mant2;
X    den_m4 = den_m3 = 0;
X    
X#if 0
X    /* buy a little extra accuracy by shifting num and denum up 10 bits */
X    for (result_idx /* loop counter */ = 0 ; result_idx < 10 ; result_idx++)
X    {
X	ASL3(num_m1, num_m2, num_m3);
X	ASL3(den_m1, den_m2, den_m3);
X    }
X#endif
X    
X    /* hot wire result mask and index */
X    result_mask = 0x00100000;			/* start at top mantissa bit */
X    result_idx = 0;				/* of first word */
X    result_mant[0] = 0;
X    result_mant[1] = 0;
X    result_mant[2] = 0;
X    
X    /* if denom is greater than num here, shift denom right one and dec num expt */
X    if (den_m1 < num_m1)
X  	goto kludge1;				/* C is assembly language,
X						   remember? */
X    if (den_m1 > num_m1)
X  	goto kludge0;
X    if (den_m2 <= num_m2)				/* first word eq, try 2nd */
X  	goto kludge1;
X    
X  kludge0:
X    
X    num_exp--;
X    ASR4(den_m1, den_m2, den_m3, den_m4);
X    
X  kludge1:
X    
X    for ( ; !((result_idx == 2) && (result_mask & 0x40000000)) ; )
X    {
X	/* if num >= den, subtract den from num and set bit in result */
X	if (num_m1 > den_m1) goto kludge2;
X	if (num_m1 < den_m1) goto kludge3;
X	if (num_m2 > den_m2) goto kludge2;
X	if (num_m2 < den_m2) goto kludge3;
X	if (num_m3 > den_m3) goto kludge2;
X	if (num_m3 < den_m3) goto kludge3;
X	if (num_m4 < den_m4) goto kludge3;
X	
X      kludge2:
X	result_mant[result_idx] |= result_mask;
X	SUB4(den_m1, den_m2, den_m3, den_m4, num_m1, num_m2, num_m3, num_m4);
X      kludge3:
X	ASR4(den_m1, den_m2, den_m3, den_m4);
X	result_mask >>= 1;
X	if (result_mask == 0)		/* next word? */
X	{
X	    result_mask = 0x80000000;
X	    result_idx++;
X	}
X    }
X
X/* stick in last half-bit if necessary */
X  if (result_mant[2])
X	{
X	den_m1 = 0;		/* handy register */
X	den_m2 = 1;
X/* busted
X	ADD2(den_m1, den_m2, result_mant[0], result_mant[1]);
X*/
X	result_mant[1]++;
X	if (result_mant[1] == 0)
X		result_mant[0]++;
X
X	if (result_mant[0] & 0x00200000)	/* overflow? */
X		{
X		num_exp--;
X		}
X	}
X  /* compute the resultant exponent */
X  num_exp = num_exp - den_exp + 0x3FF;
X
X  /* reconstruct the result in local_num */
X  num_bdp->sign = result_sign;
X  num_bdp->exp = num_exp;
X  num_bdp->mant1 = result_mant[0] & 0xFFFFF;
X  num_bdp->mant2 = result_mant[1];
X  
X  /* done! */
X  return(local_num);
X}
X#endif
SHAR_EOF
true || echo 'restore of dflonum.c failed'
fi
# ============= dflonum.s ==============
if test -f 'dflonum.s' -a X"$1" != X"-c"; then
	echo 'x - skipping dflonum.s (File already exists)'
else
echo 'x - extracting dflonum.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'dflonum.s' &&
X#NO_APP
Xgcc_compiled.:
X.data
X	.even
X_constant_zero_kludge:
X	.long 0
X_component_index:
X	.byte 3
X	.byte 3
X	.byte 2
X	.byte 3
X	.byte 3
X	.byte 2
X	.byte 1
X	.byte 3
X	.byte 2
X	.byte 2
X	.byte 3
X	.byte 1
X	.byte 0
X	.byte 3
X	.byte 1
X	.byte 2
X	.byte 2
X	.byte 1
X	.byte 3
X	.byte 0
X	.byte 0
X	.byte 2
X	.byte 1
X	.byte 1
X	.byte 2
X	.byte 0
X	.byte 0
X	.byte 1
X	.byte 1
X	.byte 0
X	.byte 0
X	.byte 0
X.text
X	.even
X.globl ___qmult
X___qmult:
X	link a6,#-40
X	moveml #0x3e20,sp at -
X	movel a6,d6
X	addql #8,d6
X	movel d6,a6@(-4)
X	moveq #16,d6
X	addl a6,d6
X	movel d6,a6@(-8)
X	movel #_component_index,a6@(-40)
X	clrw a6@(-34)
XL2:
X	cmpw #9,a6@(-34)
X	jgt L3
X	movew a6@(-34),d4
X	extl d4
X	asll #1,d4
X	addl a6,d4
X	moveq #-28,d6
X	addl d4,d6
X	movel d6,a0
X	clrw a0@
XL4:
X	addqw #1,a6@(-34)
X	jra L2
XL3:
X	nop
XL5:
X	movel a6@(-40),a0
X	moveb a0@,d6
X	extw d6
X	movew d6,a6@(-34)
X	addql #1,a6@(-40)
X	movel a6@(-40),a0
X	moveb a0@,d6
X	extw d6
X	movew d6,a6@(-36)
X	addql #1,a6@(-40)
X	movew a6@(-34),d4
X	extl d4
X	asll #1,d4
X	movel a6@(-4),a0
X	moveq #0,d0
X	movew a0@(d4:l),d0
X	movew a6@(-36),d4
X	extl d4
X	asll #1,d4
X	movel a6@(-8),a0
X#APP
X	mulu a0@(d4:l),d0
X#NO_APP
X	moveq #-28,d4
X	addl a6,d4
X	movew a6@(-34),a0
X	movew a6@(-36),a1
X	movel a0,d5
X	addl a1,d5
X	asll #1,d5
X	addql #4,d5
X	movel d4,d6
X	addl d5,d6
X	movel d6,a6@(-32)
X	lea _constant_zero_kludge,a0
X	addqw #4,a0
X	movel d6,a2
X#APP
X	addl  d0,a2@;			addxl a0 at -,a2 at -
X#NO_APP
X	movel a2,a6@(-32)
X	tstw a6@(-34)
X	jne L8
X	tstw a6@(-36)
X	jne L8
X	jra L6
XL8:
XL7:
X	jra L5
XL6:
X	moveq #-28,d4
X	addl a6,d4
X	movel d4,d6
X	addql #4,d6
X	movel d6,a6@(-32)
X	movel a6@(-32),a0
X	movel a0@,d1
X	addql #4,a6@(-32)
X	movel a6@(-32),a0
X	movel a0@,d2
X	addql #4,a6@(-32)
X	movel a6@(-32),a0
X	movel a0@,d3
X	clrw a6@(-34)
XL9:
X	cmpw #11,a6@(-34)
X	jgt L10
X#APP
X	asll #1,d3
X	roxll #1,d2
X	roxll #1,d1
X#NO_APP
XL11:
X	addqw #1,a6@(-34)
X	jra L9
XL10:
X	movel a6@(24),a0
X	movel d1,a0@
X	movel a6@(28),a0
X	movel d2,a0@
XL1:
X	moveml a6@(-64),#0x47c
X	unlk a6
X	rts
X	.even
XLC0:
X	.double 0r0.00000000000000000000e+00
X	.even
X.globl ___muldf3
X___muldf3:
X	link a6,#-32
X	moveml #0x3f00,sp at -
X	movel a6,d2
X	addql #8,d2
X	movel d2,a6@(-12)
X	moveq #16,d2
X	addl a6,d2
X	movel d2,a6@(-16)
X	movel a6@(-12),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d2
X	andl #2047,d2
X	movel d2,a6@(-20)
X	movel a6@(-16),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d2
X	andl #2047,d2
X	movel d2,a6@(-24)
X	tstl a6@(-20)
X	jne L13
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L12
XL13:
X	tstl a6@(-24)
X	jne L14
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L12
XL14:
X	movel a6@(-12),a0
X	movel a0@,d0
X	andl #1048575,d0
X	movel d0,d2
X	addl #1048576,d2
X	movel d2,a6@(-32)
X	movel a6@(-12),a0
X	movel a0@(4),d3
X	movel a6@(-16),a0
X	movel a0@,d0
X	andl #1048575,d0
X	movel d0,d4
X	addl #1048576,d4
X	movel a6@(-16),a0
X	movel a0@(4),d5
X	movel a6,d0
X	subql #8,d0
X	movel d0,sp at -
X	movel a6,d0
X	subql #4,d0
X	movel d0,sp at -
X	movel d5,sp at -
X	movel d4,sp at -
X	movel d3,sp at -
X	movel a6@(-32),sp at -
X	jbsr ___qmult
X	movel a6@(-4),d6
X	movel a6@(-8),d7
X	movel d6,d0
X	andl #2097152,d0
X	addw #24,sp
X	tstl d0
X	jeq L15
X#APP
X	asrl #1,d6
X	roxrl #1,d7
X#NO_APP
X	addql #1,a6@(-20)
XL15:
X	movel a6@(-12),a0
X	moveb a0@,d1
X	lsrb #7,d1
X	moveq #0,d0
X	moveb d1,d0
X	movel a6@(-16),a0
X	moveb a0@,d1
X	lsrb #7,d1
X	subl a0,a0
X	movel a0,d2
X	moveb d1,d2
X	movel d2,a0
X	cmpl d0,a0
X	jne L16
X	movel a6@(-12),a0
X	andb #127,a0@
X	jra L17
XL16:
X	movel a6@(-12),a0
X	orb #128,a0@
XL17:
X	movel a6@(-12),a0
X	movel a6@(-20),d0
X	addl a6@(-24),d0
X	addl #-1023,d0
X	andw #2047,d0
X	lslw #4,d0
X	andw #32783,a0@
X	orw d0,a0@
X	movel a6@(-12),a0
X	movel d6,d0
X	andl #1048575,d0
X	andl #-1048576,a0@
X	orl d0,a0@
X	movel a6@(-12),a0
X	movel d7,a0@(4)
X	movel a6@(8),d0
X	movel a6@(12),d1
X	jra L12
XL12:
X	moveml a6@(-56),#0xfc
X	unlk a6
X	rts
X	.even
X.globl ___negdf2
X___negdf2:
X	link a6,#-4
X	movel a6,a1
X	addqw #8,a1
X	movel a1,a6@(-4)
X	movel a6@(-4),a0
X	tstl a0@
X	jeq L19
X	movel a6@(-4),d0
X	movel d0,a0
X	eorl #-2147483648,a0@
XL19:
X	movel a6@(8),d0
X	movel a6@(12),d1
X	jra L18
XL18:
X	unlk a6
X	rts
X	.even
X.globl ___adddf3
X___adddf3:
X	link a6,#-20
X	moveml #0x3e20,sp at -
X	movel a6,d6
X	addql #8,d6
X	movel d6,a6@(-4)
X	moveq #16,d6
X	addl a6,d6
X	movel d6,a6@(-8)
X	movel a6@(-4),a0
X	movew a0@,d0
X	lsrw #4,d0
X	andw #2047,d0
X	movew d0,a6@(-10)
X	movel a6@(-8),a0
X	movew a0@,d0
X	lsrw #4,d0
X	andw #2047,d0
X	movew d0,a6@(-12)
X	tstw a6@(-10)
X	jne L21
X	movel a6@(16),d0
X	movel a6@(20),d1
X	jra L20
XL21:
X	tstw a6@(-12)
X	jne L22
X	movel a6@(8),d0
X	movel a6@(12),d1
X	jra L20
XL22:
X	movew a6@(-12),d6
X	cmpw a6@(-10),d6
X	jle L23
X	moveq #16,d6
X	addl a6,d6
X	movel d6,a6@(-4)
X	movel a6,d6
X	addql #8,d6
X	movel d6,a6@(-8)
X	movel a6@(-4),a0
X	movew a0@,d0
X	lsrw #4,d0
X	andw #2047,d0
X	movew d0,a6@(-10)
X	movel a6@(-8),a0
X	movew a0@,d0
X	lsrw #4,d0
X	andw #2047,d0
X	movew d0,a6@(-12)
XL23:
X	movew a6@(-10),d6
X	subw a6@(-12),d6
X	movew d6,a6@(-18)
X	cmpw #53,a6@(-18)
X	jle L24
X	movel a6@(8),d0
X	movel a6@(12),d1
X	jra L20
XL24:
X	movel a6@(-8),a0
X	movel a0@,d5
X	andl #1048575,d5
X	addl #1048576,d5
X	movel a6@(-8),a0
X	movel a0@(4),d4
X	movel a6@(-4),a0
X	movel a0@,d3
X	andl #1048575,d3
X	addl #1048576,d3
X	movel a6@(-4),a0
X	movel a0@(4),d2
X	clrw a6@(-20)
XL25:
X	movew a6@(-18),d6
X	cmpw a6@(-20),d6
X	jle L26
X#APP
X	asrl #1,d5
X	roxrl #1,d4
X#NO_APP
XL27:
X	addqw #1,a6@(-20)
X	jra L25
XL26:
X	movel a6@(-4),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	tstl d1
X	jeq L28
X#APP
X	negl d2
X	negxl d3
X#NO_APP
XL28:
X	movel a6@(-8),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	tstl d1
X	jeq L29
X#APP
X	negl d4
X	negxl d5
X#NO_APP
XL29:
X#APP
X	addl d4,d2
X	addxl d5,d3
X#NO_APP
X	movel a6,d6
X	addql #8,d6
X	movel d6,a6@(-4)
X	tstl d3
X	jge L30
X#APP
X	negl d2
X	negxl d3
X#NO_APP
X	movel a6@(-4),a0
X	orb #128,a0@
X	jra L31
XL30:
X	movel a6@(-4),a0
X	andb #127,a0@
XL31:
X	tstl d3
X	jne L32
X	tstl d2
X	jne L32
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L20
XL32:
X	nop
X	clrw a6@(-18)
XL33:
X	tstl d3
X	jlt L34
X#APP
X	asll #1,d2
X	roxll #1,d3
X#NO_APP
XL35:
X	addqw #1,a6@(-18)
X	jra L33
XL34:
X#APP
X	asll #1,d2
X	roxll #1,d3
X#NO_APP
X	clrw a6@(-20)
XL36:
X	cmpw #11,a6@(-20)
X	jgt L37
X#APP
X	asrl #1,d3
X	roxrl #1,d2
X#NO_APP
XL38:
X	addqw #1,a6@(-20)
X	jra L36
XL37:
X	movel a6@(-4),a0
X	movel d3,d0
X	andl #1048575,d0
X	andl #-1048576,a0@
X	orl d0,a0@
X	movel a6@(-4),a0
X	movel d2,a0@(4)
X	movel a6@(-4),a0
X	movew a6@(-10),a1
X	movew a6@(-18),a2
X	moveq #-11,d0
X	addl a2,d0
X	subl a1,d0
X	negl d0
X	andw #2047,d0
X	lslw #4,d0
X	andw #32783,a0@
X	orw d0,a0@
X	movel a6@(8),d0
X	movel a6@(12),d1
X	jra L20
XL20:
X	moveml a6@(-44),#0x47c
X	unlk a6
X	rts
X	.even
X.globl ___subdf3
X___subdf3:
X	link a6,#0
X	movel d2,sp at -
X	movel #___negdf2,d0
X	movel a6@(20),sp at -
X	movel a6@(16),sp at -
X	jbsr ___negdf2
X	addqw #8,sp
X	movel d0,d0
X	movel d1,d1
X	movel #___adddf3,d2
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	movel d1,sp at -
X	movel d0,sp at -
X	jbsr ___adddf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L39
XL39:
X	movel a6@(-4),d2
X	unlk a6
X	rts
X	.even
X.globl ___cmpdf2
X___cmpdf2:
X	link a6,#-16
X	movel d2,sp at -
X	movel a6,d2
X	addql #8,d2
X	movel d2,a6@(-4)
X	moveq #16,d2
X	addl a6,d2
X	movel d2,a6@(-8)
X	movel a6@(-4),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	movel d1,a6@(-12)
X	movel a6@(-8),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	movel d1,a6@(-16)
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jls L41
X	moveq #-1,d0
X	jra L40
XL41:
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jcc L42
X	moveq #1,d0
X	jra L40
XL42:
X	tstl a6@(-12)
X	jeq L43
X	moveq #16,d2
X	addl a6,d2
X	movel d2,a6@(-4)
X	movel a6,d2
X	addql #8,d2
X	movel d2,a6@(-8)
XL43:
X	movel a6@(-4),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d2
X	andl #2047,d2
X	movel d2,a6@(-12)
X	movel a6@(-8),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d2
X	andl #2047,d2
X	movel d2,a6@(-16)
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jls L44
X	moveq #1,d0
X	jra L40
XL44:
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jcc L45
X	moveq #-1,d0
X	jra L40
XL45:
X	movel a6@(-4),a0
X	movel a0@,d2
X	andl #1048575,d2
X	movel d2,a6@(-12)
X	movel a6@(-8),a0
X	movel a0@,d2
X	andl #1048575,d2
X	movel d2,a6@(-16)
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jls L46
X	moveq #1,d0
X	jra L40
XL46:
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jcc L47
X	moveq #-1,d0
X	jra L40
XL47:
X	movel a6@(-4),a0
X	movel a0@(4),a6@(-12)
X	movel a6@(-8),a0
X	movel a0@(4),a6@(-16)
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jls L48
X	moveq #1,d0
X	jra L40
XL48:
X	movel a6@(-12),d2
X	cmpl a6@(-16),d2
X	jcc L49
X	moveq #-1,d0
X	jra L40
XL49:
X	moveq #0,d0
X	jra L40
XL40:
X	movel a6@(-20),d2
X	unlk a6
X	rts
X	.even
X.globl ___fixunsdfsi
X___fixunsdfsi:
X	link a6,#-20
X	moveml #0x3000,sp at -
X	movel a6,d2
X	addql #8,d2
X	movel d2,a6@(-4)
X	movel a6@(-4),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	movel d1,a6@(-8)
X	movel a6@(-4),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d2
X	andl #2047,d2
X	movel d2,a6@(-12)
X	movel a6@(-4),a0
X	movel a0@,d0
X	andl #1048575,d0
X	movel d0,d2
X	addl #1048576,d2
X	movel d2,a6@(-16)
X	movel a6@(-4),a0
X	movel a0@(4),a6@(-20)
X	tstl a6@(-12)
X	jne L51
X	moveq #0,d0
X	jra L50
XL51:
X	cmpl #1022,a6@(-12)
X	jgt L52
X	moveq #0,d0
X	jra L50
XL52:
X	tstl a6@(-8)
X	jeq L53
X	moveq #0,d0
X	jra L50
XL53:
X	movel a6@(-16),d2
X	moveq #10,d3
X	asll d3,d2
X	movel d2,a6@(-16)
X	movel a6@(-20),d0
X	moveq #22,d2
X	asrl d2,d0
X	addl d0,a6@(-16)
X	movel #1053,d0
X	subl a6@(-12),d0
X	movel a6@(-16),d2
X	asrl d0,d2
X	movel d2,a6@(-16)
X	movel a6@(-16),d0
X	jra L50
XL50:
X	moveml a6@(-28),#0xc
X	unlk a6
X	rts
X	.even
X.globl ___fixdfsi
X___fixdfsi:
X	link a6,#-4
X	movel a6,a1
X	addqw #8,a1
X	movel a1,a6@(-4)
X	movel a6@(-4),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	tstl d1
X	jeq L55
X	movel a6@(-4),a0
X	andb #127,a0@
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixunsdfsi
X	negl d0
X	jra L54
XL55:
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixunsdfsi
X	jra L54
XL54:
X	unlk a6
X	rts
X	.even
X.globl ___fixdfdi
X___fixdfdi:
X	link a6,#-8
X	movel d2,sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixdfsi
X	addqw #8,sp
X	movel d0,a6@(-8)
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixdfsi
X	addqw #8,sp
X	movel d0,d0
X	tstl d0
X	jge L57
X	moveq #-1,d2
X	movel d2,a6@(-4)
X	jra L58
XL57:
X	clrl a6@(-4)
XL58:
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L56
XL56:
X	movel a6@(-12),d2
X	unlk a6
X	rts
X	.even
X.globl ___floatsidf
X___floatsidf:
X	link a6,#-24
X	moveml #0x3000,sp at -
X	moveq #-16,d2
X	addl a6,d2
X	movel d2,a6@(-20)
X	tstl a6@(8)
X	jne L60
X	moveq #-16,d2
X	addl a6,d2
X	movel d2,a6@(-24)
X	movel a6@(-24),a0
X	clrl a0@
X	movel a6@(-24),a0
X	addqw #4,a0
X	clrl a0@
X	movel a6@(-16),d0
X	movel a6@(-12),d1
X	jra L59
XL60:
X	tstl a6@(8)
X	jge L61
X	movel a6@(-20),a0
X	orb #128,a0@
X	negl a6@(8)
X	jra L62
XL61:
X	movel a6@(-20),a0
X	andb #127,a0@
XL62:
X	movel a6@(8),a6@(-8)
X	movel #1054,a6@(-4)
XL63:
X	tstl a6@(-8)
X	jle L64
XL65:
X	subql #1,a6@(-4)
X	movel a6@(-8),d0
X	asll #1,d0
X	movel d0,d0
X	movel d0,a6@(-8)
X	jra L63
XL64:
X	movel a6@(-8),d2
X	asll #1,d2
X	movel d2,a6@(-8)
X	movel a6@(-20),a0
X	movew a6@(-2),d0
X	andw #2047,d0
X	lslw #4,d0
X	andw #32783,a0@
X	orw d0,a0@
X	movel a6@(-20),a0
X	movel a6@(-8),d0
X	moveq #12,d2
X	asrl d2,d0
X	andl #1048575,d0
X	andl #-1048576,a0@
X	orl d0,a0@
X	movel a6@(-20),a0
X	movel a6@(-8),d2
X	moveq #20,d3
X	asll d3,d2
X	movel d2,a0@(4)
X	movel a6@(-16),d0
X	movel a6@(-12),d1
X	jra L59
XL59:
X	moveml a6@(-32),#0xc
X	unlk a6
X	rts
X	.even
XLC1:
X	.double 0r6.55360000000000000000e+04
X	.even
X.globl ___floatdidf
X___floatdidf:
X	link a6,#0
X	moveml #0x3f00,sp at -
X	movel a6@(12),sp at -
X	jbsr ___floatsidf
X	addqw #4,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d2
X	movel d1,d3
X	movel #___muldf3,d0
X	movel LC1+4,sp at -
X	movel LC1,sp at -
X	movel d3,sp at -
X	movel d2,sp at -
X	jbsr ___muldf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel #___muldf3,d4
X	movel LC1+4,sp at -
X	movel LC1,sp at -
X	movel d1,sp at -
X	movel d0,sp at -
X	jbsr ___muldf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d2
X	movel d1,d3
X	movel a6@(8),sp at -
X	jbsr ___floatsidf
X	addqw #4,sp
X	movel d0,d6
X	movel d1,d7
X	clrl sp at -
X	clrl sp at -
X	movel d7,sp at -
X	movel d6,sp at -
X	jbsr ___cmpdf2
X	addw #16,sp
X	tstl d0
X	jge L67
X	movel #___adddf3,d0
X	clrl sp at -
X	movel #1106247680,sp at -
X	movel d7,sp at -
X	movel d6,sp at -
X	jbsr ___adddf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d6
X	movel d1,d7
XL67:
X	movel d6,d4
X	movel d7,d5
X	movel #___adddf3,d0
X	movel d5,sp at -
X	movel d4,sp at -
X	movel d3,sp at -
X	movel d2,sp at -
X	jbsr ___adddf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L66
XL66:
X	moveml a6@(-24),#0xfc
X	unlk a6
X	rts
X	.even
XLC2:
X	.single 0r0.00000000000000000000e+00
X	.even
X.globl ___dtof
X___dtof:
X	link a6,#-16
X	movel d2,sp at -
X	movel a6,d2
X	addql #8,d2
X	movel d2,a6@(-4)
X	movel a6,d2
X	subql #8,d2
X	movel d2,a6@(-12)
X	movel a6@(-12),a0
X	movel a6@(-4),a1
X	moveb a1@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	moveb d1,d0
X	andb #1,d0
X	lslb #7,d0
X	andb #127,a0@
X	orb d0,a0@
X	movel a6@(-4),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d0
X	andl #2047,d0
X	tstl d0
X	jne L69
X	movel LC2,d0
X	jra L68
XL69:
X	movel a6@(-12),a0
X	movel a6@(-4),a1
X	movew a1@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d0
X	andl #2047,d0
X	addl #-896,d0
X	andw #255,d0
X	lslw #7,d0
X	andw #32895,a0@
X	orw d0,a0@
X	movel a6@(-4),a0
X	movel a0@,d0
X	andl #1048575,d0
X	asll #4,d0
X	movel a6@(-4),a0
X	movel a0@(4),d1
X	andl #-268435456,d1
X	moveq #28,d2
X	lsrl d2,d1
X	movel d0,d2
X	addl d1,d2
X	movel d2,a6@(-16)
X	addql #1,a6@(-16)
X	cmpl #16777216,a6@(-16)
X	jne L70
X	movel a6@(-12),d0
X	movel d0,a0
X	movew a0@,d0
X	lsrw #7,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d0
X	andl #255,d0
X	movew a0@,d0
X	lsrw #7,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d0
X	andl #255,d0
X	addql #1,d0
X	andw #255,d0
X	lslw #7,d0
X	andw #32895,a0@
X	orw d0,a0@
XL70:
X	movel a6@(-12),a0
X	movel a6@(-16),d0
X	asrl #1,d0
X	andl #8388607,d0
X	andl #-8388608,a0@
X	orl d0,a0@
X	movel a6@(-8),d0
X	jra L68
XL68:
X	movel a6@(-20),d2
X	unlk a6
X	rts
X	.even
X.globl ___truncdfsf2
X___truncdfsf2:
X	link a6,#-4
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___dtof
X	movel d0,d0
X	movel d0,a6@(-4)
X	movel a6@(-4),d0
X	jra L71
XL71:
X	unlk a6
X	rts
X	.even
X.globl ___ftod
X___ftod:
X	link a6,#-16
X	moveml #0x3000,sp at -
X	movel a6,d2
X	addql #8,d2
X	movel d2,a6@(-12)
X	movel a6,d2
X	subql #8,d2
X	movel d2,a6@(-16)
X	movel a6@(-12),a0
X	movew a0@,d0
X	lsrw #7,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d0
X	andl #255,d0
X	tstl d0
X	jne L73
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L72
XL73:
X	movel a6@(-16),a0
X	movel a6@(-12),a1
X	moveb a1@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	moveb d1,d0
X	andb #1,d0
X	lslb #7,d0
X	andb #127,a0@
X	orb d0,a0@
X	movel a6@(-16),a0
X	movel a6@(-12),a1
X	movew a1@,d0
X	lsrw #7,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d0
X	andl #255,d0
X	addl #896,d0
X	andw #2047,d0
X	lslw #4,d0
X	andw #32783,a0@
X	orw d0,a0@
X	movel a6@(-16),a0
X	movel a6@(-12),a1
X	movel a1@,d0
X	andl #8388607,d0
X	asrl #3,d0
X	andl #1048575,d0
X	andl #-1048576,a0@
X	orl d0,a0@
X	movel a6@(-16),a0
X	movel a6@(-12),a1
X	movel a1@,d0
X	andl #8388607,d0
X	moveq #7,d2
X	andl d2,d0
X	movel d0,d2
X	moveq #29,d3
X	asll d3,d2
X	movel d2,a0@(4)
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L72
XL72:
X	moveml a6@(-24),#0xc
X	unlk a6
X	rts
X	.even
X.globl ___extendsfdf2
X___extendsfdf2:
X	link a6,#-12
X	movel a6@(8),sp at -
X	jbsr ___ftod
X	jra L74
XL74:
X	unlk a6
X	rts
X	.even
X.globl ___divdf3
X___divdf3:
X	link a6,#-68
X	moveml #0x3f00,sp at -
X	movel a6@(8),a6@(-8)
X	movel a6@(12),a6@(-4)
X	movel a6@(16),a6@(-16)
X	movel a6@(20),a6@(-12)
X	movel a6,d7
X	subql #8,d7
X	movel d7,a6@(-20)
X	moveq #-16,d7
X	addl a6,d7
X	movel d7,a6@(-24)
X	movel a6@(-20),a0
X	movew a0@,d0
X	lsrw #4,d0
X	andw #2047,d0
X	movew d0,a6@(-26)
X	movel a6@(-24),a0
X	movew a0@,d0
X	lsrw #4,d0
X	andw #2047,d0
X	movew d0,a6@(-28)
X	clrw a6@(-30)
X	tstw a6@(-26)
X	jeq L77
X	tstw a6@(-28)
X	jeq L77
X	jra L76
XL77:
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L75
XL76:
X	movel a6@(-20),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	clrw d1
X	moveb d0,d1
X	movew d1,d0
X	addw a6@(-30),d0
X	movel a6@(-24),a0
X	moveb a0@,d1
X	lsrb #7,d1
X	clrw a6@(-66)
X	moveb d1,a6@(-65)
X	movew d0,d7
X	subw a6@(-66),d7
X	movew d7,a6@(-30)
X	movel a6@(-20),a0
X	movel a0@,d0
X	andl #1048575,d0
X	movel d0,d7
X	bset #20,d7
X	movel d7,a6@(-34)
X	movel a6@(-20),a0
X	movel a0@(4),a6@(-38)
X	clrl a6@(-42)
X	clrl a6@(-46)
X	movel a6@(-24),a0
X	movel a0@,d3
X	andl #1048575,d3
X	bset #20,d3
X	movel a6@(-24),a0
X	movel a0@(4),d4
X	moveq #0,d5
X	moveq #0,d6
X	movel #1048576,a6@(-62)
X	clrw a6@(-64)
X	clrl a6@(-58)
X	clrl a6@(-54)
X	clrl a6@(-50)
X	cmpl a6@(-34),d3
X	jcc L78
X	jra L79
XL78:
X	cmpl a6@(-34),d3
X	jls L80
X	jra L81
XL80:
X	cmpl a6@(-38),d4
X	jhi L82
X	jra L79
XL82:
X	nop
XL81:
X	subqw #1,a6@(-26)
X#APP
X	asrl #1,d3
X	roxrl #1,d4
X	roxrl #1,d5
X	roxrl #1,d6
X#NO_APP
XL79:
X	nop
XL83:
X	cmpw #2,a6@(-64)
X	jne L86
X	movel a6@(-62),d0
X	andl #1073741824,d0
X	tstl d0
X	jeq L86
X	jra L84
XL86:
X	cmpl a6@(-34),d3
X	jcc L87
X	jra L88
XL87:
X	cmpl a6@(-34),d3
X	jls L89
X	jra L90
XL89:
X	cmpl a6@(-38),d4
X	jcc L91
X	jra L88
XL91:
X	cmpl a6@(-38),d4
X	jls L92
X	jra L90
XL92:
X	cmpl a6@(-42),d5
X	jcc L93
X	jra L88
XL93:
X	cmpl a6@(-42),d5
X	jls L94
X	jra L90
XL94:
X	cmpl a6@(-46),d6
X	jls L95
X	jra L90
XL95:
X	nop
XL88:
X	movew a6@(-64),d2
X	extl d2
X	movel d2,d0
X	asll #2,d0
X	addl a6,d0
X	moveq #-58,d7
X	addl d0,d7
X	movel d7,a0
X	movel d2,d0
X	asll #2,d0
X	addl a6,d0
X	moveq #-58,d7
X	addl d0,d7
X	movel d7,a1
X	movel a1@,d7
X	orl a6@(-62),d7
X	movel d7,a0@
X	movel a6@(-46),d0
X#APP
X	subl d6,d0
X#NO_APP
X	movel d0,a6@(-46)
X	movel a6@(-42),d0
X#APP
X	subxl d5,d0
X#NO_APP
X	movel d0,a6@(-42)
X	movel a6@(-38),d0
X#APP
X	subxl d4,d0
X#NO_APP
X	movel d0,a6@(-38)
X	movel a6@(-34),d0
X#APP
X	subxl d3,d0
X#NO_APP
X	movel d0,a6@(-34)
XL90:
X#APP
X	asrl #1,d3
X	roxrl #1,d4
X	roxrl #1,d5
X	roxrl #1,d6
X#NO_APP
X	movel a6@(-62),d7
X	lsrl #1,d7
X	movel d7,a6@(-62)
X	tstl a6@(-62)
X	jne L96
X	movel #-2147483648,a6@(-62)
X	addqw #1,a6@(-64)
XL96:
XL85:
X	jra L83
XL84:
X	tstl a6@(-50)
X	jeq L97
X	moveq #0,d3
X	moveq #1,d4
X	addql #1,a6@(-54)
X	tstl a6@(-54)
X	jne L98
X	addql #1,a6@(-58)
XL98:
X	movel a6@(-58),d0
X	andl #2097152,d0
X	tstl d0
X	jeq L99
X	subqw #1,a6@(-26)
XL99:
XL97:
X	movew a6@(-26),d0
X	subw a6@(-28),d0
X	movew d0,d7
X	addw #1023,d7
X	movew d7,a6@(-26)
X	movel a6@(-20),a0
X	movew a6@(-30),d0
X	extl d0
X	andb #1,d0
X	lslb #7,d0
X	andb #127,a0@
X	orb d0,a0@
X	movel a6@(-20),a0
X	movew a6@(-26),d0
X	extl d0
X	andw #2047,d0
X	lslw #4,d0
X	andw #32783,a0@
X	orw d0,a0@
X	movel a6@(-20),a0
X	movel a6@(-58),d0
X	andl #1048575,d0
X	andl #1048575,d0
X	andl #-1048576,a0@
X	orl d0,a0@
X	movel a6@(-20),a0
X	movel a6@(-54),a0@(4)
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L75
XL75:
X	moveml a6@(-92),#0xfc
X	unlk a6
X	rts
SHAR_EOF
true || echo 'restore of dflonum.s failed'
fi
# ============= divsi3.s ==============
if test -f 'divsi3.s' -a X"$1" != X"-c"; then
	echo 'x - skipping divsi3.s (File already exists)'
else
echo 'x - extracting divsi3.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'divsi3.s' &&
X	.text			| cjpurcell 30Jul89
X	.even			|
X	.globl __divsi3		|
X	.globl ___divsi3	|
X__divsi3:			|
X___divsi3:			|
X	link a6,#-2		| 2 bytes of local
X	clrw a6@(-2)		| zap the local; it's our neg flg
X				|
X	movel a6@(12),d0	| get b
X	bge divs1		| pos, ok
X	negl d0			| negate it
X	addqw #1,a6@(-2)	| and inc the flag
Xdivs1:				|
X	movel d0,sp at -		| push adjusted b
X	movel a6@(8),d0		| get a
X	bge divs2		| pos, ok
X	negl d0			| negate it
X	subqw #1,a6@(-2)	| and dec the flag
Xdivs2:				|
X	movel d0,sp at -		| push adjusted a
X	jbsr ___udivsi3		| do an unsigned div
X	addql #8,sp		| flush args
X	tstw a6@(-2)		| flag set?
X	beq divs3		| nope, go ahead and return
X	negl d0			| negate the result
Xdivs3:				|
X				|
X	unlk a6			| unlink the frame
X	rts			| done!
SHAR_EOF
true || echo 'restore of divsi3.s failed'
fi
# ============= flonum.h ==============
if test -f 'flonum.h' -a X"$1" != X"-c"; then
	echo 'x - skipping flonum.h (File already exists)'
else
echo 'x - extracting flonum.h (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'flonum.h' &&
X
X/* Defs and macros for floating point code.  This stuff is heavily based
X   on Scott McCauley's code, except that this version works :-} */
X
X
X/* These definitions work for machines where an SF value is
X   returned in the same register as an int.  */
X
X#ifndef SFVALUE  
X#define SFVALUE int
X#endif
X
X#ifndef INTIFY
X#define INTIFY(FLOATVAL)  (intify.f = (FLOATVAL), intify.i)
X#endif
X
X/* quasi-IEEE floating point number definitions */
X
Xstruct bitfloat {
X	unsigned sign : 1;
X	unsigned exp : 8;
X	unsigned mant : 23;
X};
X
Xstruct bitdouble {
X	unsigned sign : 1;
X	unsigned exp : 11;
X	unsigned mant1 : 20;
X	unsigned long mant2;
X};
X
Xunion double_di { double d; int i[2]; };
Xunion flt_or_int { int i; float f; };
X
X#ifdef WORDS_BIG_ENDIAN
X#define HIGH 0
X#define LOW 1
X#else
X#define HIGH 1
X#define LOW 0
X#endif
X
X/* start of symbolic asm definitions */
X
X/* you may have to change the g's to d's if you start getting
X   illegal operands from as */
X
X#define MUL(a, b) asm volatile ("mulu %2,%0" : "=d" (b) : "0" (b) , "g" (a))
X#define DIV(a, b) asm volatile ("divu %2,%0" : "=d" (b) : "0" (b) , "g" (a))
X#define SWAP(a) asm volatile ("swap %0" : "=r" (a) : "r" (a) , "r" (a) )
X
X#define ASL2(r1, r2) { asm volatile ("asll #1,%0" : "=d" (r2) : "d" (r2));\
X		       asm volatile ("roxll #1,%0" : "=d" (r1) : "d" (r1)); }
X#define ASL3(r1, r2, r3) { asm volatile ("asll #1,%0" : "=d" (r3) : "d" (r3));\
X			   asm volatile ("roxll #1,%0" : "=d" (r2) : "d" (r2));\
X			   asm volatile ("roxll #1,%0" : "=d" (r1) : "d" (r1)); }
X
X#define ASR2(r1, r2) { asm volatile ("asrl #1,%0" : "=d" (r1) : "d" (r1));\
X		       asm volatile ("roxrl #1,%0" : "=d" (r2) : "d" (r2)); }
X#define ASR3(r1, r2, r3) { asm volatile ("asrl #1,%0" : "=d" (r1) : "d" (r1));\
X			   asm volatile ("roxrl #1,%0" : "=d" (r2) : "d" (r2));\
X			   asm volatile ("roxrl #1,%0" : "=d" (r3) : "d" (r3)); }
X#define ASR4(r1, r2, r3, r4) { asm volatile ("asrl #1,%0" : "=d" (r1) : "d" (r1));\
X			       asm volatile ("roxrl #1,%0" : "=d" (r2) : "d" (r2));\
X			       asm volatile ("roxrl #1,%0" : "=d" (r3) : "d" (r3));\
X			       asm volatile ("roxrl #1,%0" : "=d" (r4) : "d" (r4)); }
X
X#define ADD2(r1, r2, r3, r4) \
X	{ asm volatile ("addl %2,%0": "=g" (r4) : "0" (r4) , "g" (r2)); \
X	  asm volatile ("addxl %2,%0": "=g" (r3) : "0" (r3) , "g" (r1)); }
X
X/* y <- y - x  */
X#define SUB3(x1, x2, x3, y1, y2, y3) \
X	{ asm volatile ("subl %2,%0": "=g" (y3) : "g" (y3) , "d" (x3)); \
X	  asm volatile ("subxl %2,%0": "=g" (y2) : "g" (y2) , "d" (x2));\
X	  asm volatile ("subxl %2,%0": "=g" (y1) : "g" (y1) , "d" (x1)); }
X
X/* sub4 here is rather complex, as the compiler is overwhelmed by me wanting
X   to have 8 data registers allocated for mantissa accumulators.  Help it out
X   by declaring a temp that it can move stuff in and out of.  */
X#define SUB4(x1, x2, x3, x4, y1, y2, y3, y4) \
X	{ register long temp = y4; \
X	  asm volatile ("subl %2,%0": "=d" (temp) : "d" (temp) , "d" (x4)); \
X	  y4 = temp; temp = y3; \
X	  asm volatile ("subxl %2,%0": "=d" (temp) : "d" (temp) , "d" (x3));\
X	  y3 = temp; temp = y2; \
X	  asm volatile ("subxl %2,%0": "=d" (temp) : "d" (temp) , "d" (x2));\
X	  y2 = temp; temp = y1; \
X	  asm volatile ("subxl %2,%0": "=d" (temp) : "d" (temp) , "d" (x1));\
X	  y1 = temp; }
X
X#define NEG(r1, r2) { asm volatile ("negl %0" : "=d" (r2) : "d" (r2)); \
X		      asm volatile ("negxl %0" : "=d" (r1) : "d" (r1)); } 
X
X/* switches for which routines to compile.  All the single-float and
Xlong-int arithmetic routines are turned off here, as they were all
Xdone in assembly language last year.  */
X
X/*
X#define L_umulsi3
X#define L_mulsi3
X#define L_udivsi3
X#define L_divsi3
X#define L_umodsi3
X#define L_modsi3
X#define L_lshrsi3
X#define L_lshlsi3
X#define L_ashrsi3
X#define L_ashlsi3
X*/
X#define L_divdf3
X#define L_muldf3
X#define L_negdf2
X#define L_adddf3
X#define L_subdf3
X#define L_cmpdf2
X#define L_fixunsdfsi
X#define L_fixunsdfdi
X#define L_fixdfdi
X#define L_floatsidf
X#define L_floatdidf
X/*
X#define L_addsf3
X#define L_negsf2
X#define L_subsf3
X#define L_cmpsf2
X#define L_mulsf3
X#define L_divsf3
X*/
X#define L_truncdfsf2
X#define L_extendsfdf2
X
SHAR_EOF
true || echo 'restore of flonum.h failed'
fi
# ============= frexp.c ==============
if test -f 'frexp.c' -a X"$1" != X"-c"; then
	echo 'x - skipping frexp.c (File already exists)'
else
echo 'x - extracting frexp.c (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'frexp.c' &&
X/*
X * double frexp(value, eptr)
X * double value;
X * int *eptr;
X *
X * returns significand (|significand| < 1)
X *	   in *eptr returns n such that value = significand * 2**n
X *
X * ++jrb	bammi at dsrgsun.ces.cwru.edu
X *
X */
X#include "flonum.h"
X
X#define BIAS 1023
X#define B1   1022
X
Xdouble frexp(value, eptr)
Xdouble value;
X#ifdef SHORTLIB
Xshort *eptr;
X#else
Xint *eptr;
X#endif
X{
X    struct bitdouble *res = (struct bitdouble *) &value;
X    unsigned int expo, sign;
X#ifdef __STDC__
X    double norm( double, int, int, int );
X#else
X    extern double norm();
X#endif
X    
X    expo = res->exp;
X    sign = res->sign;
X    res->exp = 0;
X    res->sign = 0;
X    *eptr = expo - B1;
X    if(expo != 0)
X	res->exp = 1;	/* put in hidden bit */
X    else
X	if((res->mant1 == 0) && (res->mant2 == 0))
X	{
X	    *eptr = 0;
X	    return 0.0;	/* no point in normalizing (exp will be wrong) */
X	}
X
X    return norm(value, B1, sign, 0);
X}
X
X#ifdef TEST /* combined test for frexp and ldexp */
X	/* 32 bit ints for this test please */
X
X#ifdef __MSHORT__
X#error "please run this test in 32 bit int mode"
X#endif
X
X#include <stdio.h>
X
X#define NTEST	100000L
X#define MAXRAND 0x7fffffffL	/* assuming 32 bit ints */
X#define ABS(X)	( (X < 0) ? (-X) : X )
Xextern long rand();
X
Xint main()
X{
X    double sig, e, expected, result, max_abs_err;
X    int twoexp;
X    register long i;
X    register long errs = 0;
X    register int s;
X#ifdef __STDC__
X    double ldexp(double, int);
X    double frexp(double, int *);
X#else
X    extern double ldexp();
X    extern double frexp();
X#endif
X
X    max_abs_err = 0.0;
X    for(i = 0; i < NTEST; i++)
X    {
X	expected = ((double)(s = rand()) + (double)rand())/(double)(MAXRAND);
X	if(s > (MAXRAND >> 1)) expected = -expected;
X
X	sig = frexp(expected, &twoexp);
X	if(ABS(sig) >= 1.0)
X	{
X	    printf("sig > 1, %.4e: %.4e  %d\n", expected, sig, twoexp);
X	    errs++;
X	    continue;
X	}
X	
X	result = ldexp(sig, twoexp);
X	
X	e = (expected == 0.0) ? result : (result - expected)/expected;
X	if(e < 0) e = (-e);
X	if(e > 1.0e-6)
X	{
X	    printf("%.8e: %.8e  E %.8e\n",
X		   expected, result, e);
X	    errs++;
X	}
X	if (e > max_abs_err) max_abs_err = e;
X    }
X    
X    printf("%ld Error(s), Max abs err %.14e\n", errs, max_abs_err);
X    return errs;
X}
X#endif /* TEST */
SHAR_EOF
true || echo 'restore of frexp.c failed'
fi
# ============= frexp.s ==============
if test -f 'frexp.s' -a X"$1" != X"-c"; then
	echo 'x - skipping frexp.s (File already exists)'
else
echo 'x - extracting frexp.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'frexp.s' &&
X#NO_APP
Xgcc_compiled.:
X.text
X	.even
XLC0:
X	.double 0r0.00000000000000000000e+00
X	.even
X.globl _frexp
X_frexp:
X	link a6,#-12
X	movel d2,sp at -
X	movel a6,d2
X	addql #8,d2
X	movel d2,a6@(-4)
X	movel a6@(-4),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d2
X	andl #2047,d2
X	movel d2,a6@(-8)
X	movel a6@(-4),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	movel d1,a6@(-12)
X	movel a6@(-4),a0
X	andw #32783,a0@
X	movel a6@(-4),a0
X	andb #127,a0@
X	movel a6@(16),a0
X	movel a6@(-8),d2
X	addl #-1022,d2
X	movel d2,a0@
X	tstl a6@(-8)
X	jeq L2
X	movel a6@(-4),a0
X	andw #32783,a0@
X	orw #16,a0@
X	jra L3
XL2:
X	movel a6@(-4),a0
X	movel a0@,d0
X	andl #1048575,d0
X	tstl d0
X	jne L4
X	movel a6@(-4),a0
X	tstl a0@(4)
X	jne L4
X	movel a6@(16),a0
X	clrl a0@
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L1
XL4:
XL3:
X	clrl sp at -
X	movel a6@(-12),sp at -
X	pea 1022:w
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr _norm
X	jra L1
XL1:
X	movel a6@(-16),d2
X	unlk a6
X	rts
SHAR_EOF
true || echo 'restore of frexp.s failed'
fi
# ============= gnulib00.s ==============
if test -f 'gnulib00.s' -a X"$1" != X"-c"; then
	echo 'x - skipping gnulib00.s (File already exists)'
else
echo 'x - extracting gnulib00.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'gnulib00.s' &&
X#NO_APP
Xgcc_compiled.:
X.text
X	.even
X.globl __eprintf
X__eprintf:
X	link a6,#0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	moveq #52,d0
X	addl #__iob,d0
X	movel d0,sp at -
X	jbsr _fprintf
XL1:
X	unlk a6
X	rts
X	.even
X.globl __umulsi3
X__umulsi3:
X	link a6,#0
X	movel #___mulsi3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___mulsi3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,d0
X	jra L2
XL2:
X	unlk a6
X	rts
X	.even
X.globl __mulsi3
X__mulsi3:
X	link a6,#0
X	movel #___mulsi3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___mulsi3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,d0
X	jra L3
XL3:
X	unlk a6
X	rts
X	.even
X.globl __udivsi3
X__udivsi3:
X	link a6,#0
X	movel #___udivsi3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___udivsi3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,d0
X	jra L4
XL4:
X	unlk a6
X	rts
X	.even
X.globl __divsi3
X__divsi3:
X	link a6,#0
X	movel #___divsi3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___divsi3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,d0
X	jra L5
XL5:
X	unlk a6
X	rts
X	.even
X.globl __umodsi3
X__umodsi3:
X	link a6,#0
X	movel #___umodsi3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___umodsi3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,d0
X	jra L6
XL6:
X	unlk a6
X	rts
X	.even
X.globl __modsi3
X__modsi3:
X	link a6,#0
X	movel #___modsi3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___modsi3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,d0
X	jra L7
XL7:
X	unlk a6
X	rts
X	.even
X.globl __lshrsi3
X__lshrsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	lsrl d1,d0
X	jra L8
XL8:
X	unlk a6
X	rts
X	.even
X.globl __lshlsi3
X__lshlsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	lsll d1,d0
X	jra L9
XL9:
X	unlk a6
X	rts
X	.even
X.globl __ashrsi3
X__ashrsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	asrl d1,d0
X	jra L10
XL10:
X	unlk a6
X	rts
X	.even
X.globl __ashlsi3
X__ashlsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	asll d1,d0
X	jra L11
XL11:
X	unlk a6
X	rts
X	.even
X.globl __divdf3
X__divdf3:
X	link a6,#0
X	movel #___divdf3,d0
X	movel a6@(20),sp at -
X	movel a6@(16),sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___divdf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L12
XL12:
X	unlk a6
X	rts
X	.even
X.globl __muldf3
X__muldf3:
X	link a6,#0
X	movel #___muldf3,d0
X	movel a6@(20),sp at -
X	movel a6@(16),sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___muldf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L13
XL13:
X	unlk a6
X	rts
X	.even
X.globl __negdf2
X__negdf2:
X	link a6,#0
X	movel #___negdf2,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___negdf2
X	addqw #8,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L14
XL14:
X	unlk a6
X	rts
X	.even
X.globl __adddf3
X__adddf3:
X	link a6,#0
X	movel #___adddf3,d0
X	movel a6@(20),sp at -
X	movel a6@(16),sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___adddf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L15
XL15:
X	unlk a6
X	rts
X	.even
X.globl __subdf3
X__subdf3:
X	link a6,#0
X	movel #___subdf3,d0
X	movel a6@(20),sp at -
X	movel a6@(16),sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___subdf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L16
XL16:
X	unlk a6
X	rts
X	.even
X.globl __cmpdf2
X__cmpdf2:
X	link a6,#0
X	movel a6@(20),sp at -
X	movel a6@(16),sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___cmpdf2
X	addw #16,sp
X	tstl d0
X	jle L18
X	moveq #1,d0
X	jra L17
X	jra L19
XL18:
X	movel a6@(20),sp at -
X	movel a6@(16),sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___cmpdf2
X	addw #16,sp
X	tstl d0
X	jge L20
X	moveq #-1,d0
X	jra L17
XL20:
XL19:
X	moveq #0,d0
X	jra L17
XL17:
X	unlk a6
X	rts
X	.even
X.globl __fixunsdfsi
X__fixunsdfsi:
X	link a6,#0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixunsdfsi
X	addqw #8,sp
X	movel d0,d0
X	jra L21
XL21:
X	unlk a6
X	rts
X	.even
X.globl __fixunsdfdi
X__fixunsdfdi:
X	link a6,#-8
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixunsdfsi
X	addqw #8,sp
X	movel d0,a6@(-8)
X	clrl a6@(-4)
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L22
XL22:
X	unlk a6
X	rts
X	.even
X.globl __fixdfsi
X__fixdfsi:
X	link a6,#0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixdfsi
X	addqw #8,sp
X	movel d0,d0
X	jra L23
XL23:
X	unlk a6
X	rts
X	.even
X.globl __fixdfdi
X__fixdfdi:
X	link a6,#-8
X	movel d2,sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixdfsi
X	addqw #8,sp
X	movel d0,a6@(-8)
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixdfsi
X	addqw #8,sp
X	movel d0,d0
X	tstl d0
X	jge L25
X	moveq #-1,d2
X	movel d2,a6@(-4)
X	jra L26
XL25:
X	clrl a6@(-4)
XL26:
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L24
XL24:
X	movel a6@(-12),d2
X	unlk a6
X	rts
X	.even
X.globl __floatsidf
X__floatsidf:
X	link a6,#0
X	movel a6@(8),sp at -
X	jbsr ___floatsidf
X	addqw #4,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L27
XL27:
X	unlk a6
X	rts
X	.even
XLC0:
X	.double 0r6.55360000000000000000e+04
X	.even
X.globl __floatdidf
X__floatdidf:
X	link a6,#0
X	moveml #0x3f00,sp at -
X	movel a6@(12),sp at -
X	jbsr ___floatsidf
X	addqw #4,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d2
X	movel d1,d3
X	movel #___muldf3,d0
X	movel LC0+4,sp at -
X	movel LC0,sp at -
X	movel d3,sp at -
X	movel d2,sp at -
X	jbsr ___muldf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel #___muldf3,d4
X	movel LC0+4,sp at -
X	movel LC0,sp at -
X	movel d1,sp at -
X	movel d0,sp at -
X	jbsr ___muldf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d2
X	movel d1,d3
X	movel a6@(8),sp at -
X	jbsr ___floatsidf
X	addqw #4,sp
X	movel d0,d6
X	movel d1,d7
X	clrl sp at -
X	clrl sp at -
X	movel d7,sp at -
X	movel d6,sp at -
X	jbsr ___cmpdf2
X	addw #16,sp
X	tstl d0
X	jge L29
X	movel #___adddf3,d0
X	clrl sp at -
X	movel #1106247680,sp at -
X	movel d7,sp at -
X	movel d6,sp at -
X	jbsr ___adddf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d6
X	movel d1,d7
XL29:
X	movel d6,d4
X	movel d7,d5
X	movel #___adddf3,d0
X	movel d5,sp at -
X	movel d4,sp at -
X	movel d3,sp at -
X	movel d2,sp at -
X	jbsr ___adddf3
X	addw #16,sp
X	movel d0,d0
X	movel d1,d1
X	movel d0,d0
X	movel d1,d1
X	jra L28
XL28:
X	moveml a6@(-24),#0xfc
X	unlk a6
X	rts
X	.even
X.globl __addsf3
X__addsf3:
X	link a6,#-4
X	movel #___addsf3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___addsf3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,a6@(-4)
X	movel a6@(-4),d0
X	jra L30
XL30:
X	unlk a6
X	rts
X	.even
X.globl __negsf2
X__negsf2:
X	link a6,#-4
X	movel #___negsf2,d0
X	movel a6@(8),sp at -
X	jbsr ___negsf2
X	addqw #4,sp
X	movel d0,d0
X	movel d0,a6@(-4)
X	movel a6@(-4),d0
X	jra L31
XL31:
X	unlk a6
X	rts
X	.even
X.globl __subsf3
X__subsf3:
X	link a6,#-4
X	movel #___subsf3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___subsf3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,a6@(-4)
X	movel a6@(-4),d0
X	jra L32
XL32:
X	unlk a6
X	rts
X	.even
X.globl __cmpsf2
X__cmpsf2:
X	link a6,#-4
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___cmpsf2
X	addqw #8,sp
X	tstl d0
X	jle L34
X	moveq #1,d0
X	jra L33
X	jra L35
XL34:
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___cmpsf2
X	addqw #8,sp
X	tstl d0
X	jge L36
X	moveq #-1,d0
X	jra L33
XL36:
XL35:
X	moveq #0,d0
X	jra L33
XL33:
X	unlk a6
X	rts
X	.even
X.globl __mulsf3
X__mulsf3:
X	link a6,#-4
X	movel #___mulsf3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___mulsf3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,a6@(-4)
X	movel a6@(-4),d0
X	jra L37
XL37:
X	unlk a6
X	rts
X	.even
X.globl __divsf3
X__divsf3:
X	link a6,#-4
X	movel #___divsf3,d0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___divsf3
X	addqw #8,sp
X	movel d0,d0
X	movel d0,a6@(-4)
X	movel a6@(-4),d0
X	jra L38
XL38:
X	unlk a6
X	rts
X	.even
X.globl __truncdfsf2
X__truncdfsf2:
X	link a6,#-4
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___truncdfsf2
X	addqw #8,sp
X	movel d0,a6@(-4)
X	movel a6@(-4),d0
X	jra L39
XL39:
X	unlk a6
X	rts
X	.even
X.globl __extendsfdf2
X__extendsfdf2:
X	link a6,#-4
X	movel a6@(8),sp at -
X	jbsr ___extendsfdf2
X	addqw #4,sp
X	movel d0,d0
X	movel d1,d1
X	jra L40
XL40:
X	unlk a6
X	rts
SHAR_EOF
true || echo 'restore of gnulib00.s failed'
fi
# ============= gnulib30.s ==============
if test -f 'gnulib30.s' -a X"$1" != X"-c"; then
	echo 'x - skipping gnulib30.s (File already exists)'
else
echo 'x - extracting gnulib30.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'gnulib30.s' &&
X#NO_APP
Xgcc_compiled.:
X.text
X	.even
X.globl __eprintf
X__eprintf:
X	link a6,#0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	moveq #40,d0
X	addl #__iob,d0
X	movel d0,sp at -
X	jbsr _fprintf
XL1:
X	unlk a6
X	rts
X	.even
X.globl __umulsi3
X__umulsi3:
X	link a6,#0
X	movel a6@(8),d0
X	mulsl a6@(12),d0
X	movel d0,d0
X	jra L2
XL2:
X	unlk a6
X	rts
X	.even
X.globl __mulsi3
X__mulsi3:
X	link a6,#0
X	movel a6@(8),d0
X	mulsl a6@(12),d0
X	movel d0,d0
X	jra L3
XL3:
X	unlk a6
X	rts
X	.even
X.globl __udivsi3
X__udivsi3:
X	link a6,#0
X	movel a6@(8),d0
X	divul a6@(12),d0
X	movel d0,d0
X	jra L4
XL4:
X	unlk a6
X	rts
X	.even
X.globl __divsi3
X__divsi3:
X	link a6,#0
X	movel a6@(8),d0
X	divsl a6@(12),d0
X	movel d0,d0
X	jra L5
XL5:
X	unlk a6
X	rts
X	.even
X.globl __umodsi3
X__umodsi3:
X	link a6,#0
X	movel a6@(8),d0
X	divull a6@(12),d1:d0
X	movel d1,d0
X	jra L6
XL6:
X	unlk a6
X	rts
X	.even
X.globl __modsi3
X__modsi3:
X	link a6,#0
X	movel a6@(8),d0
X	divsll a6@(12),d1:d0
X	movel d1,d0
X	jra L7
XL7:
X	unlk a6
X	rts
X	.even
X.globl __lshrsi3
X__lshrsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	lsrl d1,d0
X	jra L8
XL8:
X	unlk a6
X	rts
X	.even
X.globl __lshlsi3
X__lshlsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	lsll d1,d0
X	jra L9
XL9:
X	unlk a6
X	rts
X	.even
X.globl __ashrsi3
X__ashrsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	asrl d1,d0
X	jra L10
XL10:
X	unlk a6
X	rts
X	.even
X.globl __ashlsi3
X__ashlsi3:
X	link a6,#0
X	movel a6@(8),d0
X	movel a6@(12),d1
X	asll d1,d0
X	jra L11
XL11:
X	unlk a6
X	rts
X	.even
X.globl __divdf3
X__divdf3:
X	link a6,#0
X	fmoved a6@(8),fp0
X	fdivd a6@(16),fp0
X	fmoved fp0,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L12
XL12:
X	unlk a6
X	rts
X	.even
X.globl __muldf3
X__muldf3:
X	link a6,#0
X	fmoved a6@(8),fp0
X	fmuld a6@(16),fp0
X	fmoved fp0,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L13
XL13:
X	unlk a6
X	rts
X	.even
X.globl __negdf2
X__negdf2:
X	link a6,#0
X	fnegd a6@(8),fp0
X	fmoved fp0,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L14
XL14:
X	unlk a6
X	rts
X	.even
X.globl __adddf3
X__adddf3:
X	link a6,#0
X	fmoved a6@(8),fp0
X	faddd a6@(16),fp0
X	fmoved fp0,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L15
XL15:
X	unlk a6
X	rts
X	.even
X.globl __subdf3
X__subdf3:
X	link a6,#0
X	fmoved a6@(8),fp0
X	fsubd a6@(16),fp0
X	fmoved fp0,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L16
XL16:
X	unlk a6
X	rts
X	.even
X.globl __cmpdf2
X__cmpdf2:
X	link a6,#0
X	fmoved a6@(8),fp0
X	fcmpd a6@(16),fp0
X	fjngt L18
X	moveq #1,d0
X	jra L17
X	jra L19
XL18:
X	fmoved a6@(8),fp0
X	fcmpd a6@(16),fp0
X	fjnlt L20
X	moveq #-1,d0
X	jra L17
XL20:
XL19:
X	clrl d0
X	jra L17
XL17:
X	unlk a6
X	rts
X	.even
X.globl __fixunsdfsi
X__fixunsdfsi:
X	link a6,#0
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixunsdfsi
X	addqw #8,sp
X	movel d0,d0
X	jra L21
XL21:
X	unlk a6
X	rts
X	.even
X.globl __fixunsdfdi
X__fixunsdfdi:
X	link a6,#-8
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr ___fixunsdfsi
X	addqw #8,sp
X	movel d0,a6@(-8)
X	clrl a6@(-4)
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L22
XL22:
X	unlk a6
X	rts
X	.even
X.globl __fixdfsi
X__fixdfsi:
X	link a6,#0
X	fintrzd a6@(8),fp0
X	fmovel fp0,d0
X	jra L23
XL23:
X	unlk a6
X	rts
X	.even
X.globl __fixdfdi
X__fixdfdi:
X	link a6,#-8
X	movel d2,sp at -
X	fintrzd a6@(8),fp0
X	fmovel fp0,a6@(-8)
X	fintrzd a6@(8),fp0
X	fmovel fp0,d0
X	tstl d0
X	jge L25
X	moveq #-1,d2
X	movel d2,a6@(-4)
X	jra L26
XL25:
X	clrl a6@(-4)
XL26:
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L24
XL24:
X	movel a6@(-12),d2
X	unlk a6
X	rts
X	.even
X.globl __floatsidf
X__floatsidf:
X	link a6,#0
X	fmovel a6@(8),fp0
X	fmoved fp0,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L27
XL27:
X	unlk a6
X	rts
X	.even
XLC0:
X	.double 0r6.55360000000000000000e+04
X	.even
X.globl __floatdidf
X__floatdidf:
X	link a6,#0
X	fmovem #0x4,sp at -
X	fmovel a6@(12),fp0
X	fmuld LC0,fp0
X	fmuld LC0,fp0
X	fmovel a6@(8),fp1
X	ftstx fp1
X	fjge L29
X	faddd #0r4294967296,fp1
XL29:
X	fmovex fp0,fp2
X	faddx fp1,fp2
X	fmoved fp2,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L28
XL28:
X	fmovem a6@(-12),#0x20
X	unlk a6
X	rts
X	.even
X.globl __addsf3
X__addsf3:
X	link a6,#-4
X	fmoves a6@(8),fp0
X	fadds a6@(12),fp0
X	fmoves fp0,a6@(-4)
X	movel a6@(-4),d0
X	jra L30
XL30:
X	unlk a6
X	rts
X	.even
X.globl __negsf2
X__negsf2:
X	link a6,#-4
X	fnegs a6@(8),fp0
X	fmoves fp0,a6@(-4)
X	movel a6@(-4),d0
X	jra L31
XL31:
X	unlk a6
X	rts
X	.even
X.globl __subsf3
X__subsf3:
X	link a6,#-4
X	fmoves a6@(8),fp0
X	fsubs a6@(12),fp0
X	fmoves fp0,a6@(-4)
X	movel a6@(-4),d0
X	jra L32
XL32:
X	unlk a6
X	rts
X	.even
X.globl __cmpsf2
X__cmpsf2:
X	link a6,#-4
X	fmoves a6@(8),fp0
X	fcmps a6@(12),fp0
X	fjngt L34
X	moveq #1,d0
X	jra L33
X	jra L35
XL34:
X	fmoves a6@(8),fp0
X	fcmps a6@(12),fp0
X	fjnlt L36
X	moveq #-1,d0
X	jra L33
XL36:
XL35:
X	clrl d0
X	jra L33
XL33:
X	unlk a6
X	rts
X	.even
X.globl __mulsf3
X__mulsf3:
X	link a6,#-4
X	fmoves a6@(8),fp0
X	fsglmuls a6@(12),fp0
X	fmoves fp0,a6@(-4)
X	movel a6@(-4),d0
X	jra L37
XL37:
X	unlk a6
X	rts
X	.even
X.globl __divsf3
X__divsf3:
X	link a6,#-4
X	fmoves a6@(8),fp0
X	fsgldivs a6@(12),fp0
X	fmoves fp0,a6@(-4)
X	movel a6@(-4),d0
X	jra L38
XL38:
X	unlk a6
X	rts
X	.even
X.globl __truncdfsf2
X__truncdfsf2:
X	link a6,#-4
X	fmoved a6@(8),fp0
X	fmoves fp0,a6@(-4)
X	movel a6@(-4),d0
X	jra L39
XL39:
X	unlk a6
X	rts
X	.even
X.globl __extendsfdf2
X__extendsfdf2:
X	link a6,#-4
X	fmoves a6@(8),fp0
X	fmoved fp0,sp at -
X	movel sp at +,d0
X	movel sp at +,d1
X	jra L40
XL40:
X	unlk a6
X	rts
SHAR_EOF
true || echo 'restore of gnulib30.s failed'
fi
# ============= gnulib_.c ==============
if test -f 'gnulib_.c' -a X"$1" != X"-c"; then
	echo 'x - skipping gnulib_.c (File already exists)'
else
echo 'x - extracting gnulib_.c (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'gnulib_.c' &&
X/* Subroutines needed by GCC output code on some machines.  */
X/* Compile this file with the Unix C compiler!  */
X
X#include "config.h"
X
X/* These definitions work for machines where an SF value is
X   returned in the same register as an int.  */
X
X#ifndef SFVALUE  
X#define SFVALUE int
X#endif
X
X#ifndef INTIFY
X#define INTIFY(FLOATVAL)  (intify.f = (FLOATVAL), intify.i)
X#endif
X
Xunion double_di { double d; int i[2]; };
Xunion flt_or_int { int i; float f; };
X
X#ifdef WORDS_BIG_ENDIAN
X#define HIGH 0
X#define LOW 1
X#else
X#define HIGH 1
X#define LOW 0
X#endif
X
X#define L_eprintf
X#define L_umulsi3
X#define L_mulsi3
X#define L_udivsi3
X#define L_divsi3
X#define L_umodsi3
X#define L_modsi3
X#define L_lshrsi3
X#define L_lshlsi3
X#define L_ashrsi3
X#define L_ashlsi3
X
X#ifdef L_eprintf
X#include <stdio.h>
X/* This is used by the `assert' macro.  */
Xvoid
X_eprintf (string, line)
X     char *string;
X     int line;
X{
X  fprintf (stderr, string, line);
X}
X#endif
X
X#ifdef L_umulsi3
X_umulsi3 (a, b)
X     unsigned a, b;
X{
X  return a * b;
X}
X#endif
X
X#ifdef L_mulsi3
X_mulsi3 (a, b)
X     int a, b;
X{
X  return a * b;
X}
X#endif
X
X#ifdef L_udivsi3
X_udivsi3 (a, b)
X     unsigned a, b;
X{
X  return a / b;
X}
X#endif
X
X#ifdef L_divsi3
X_divsi3 (a, b)
X     int a, b;
X{
X  return a / b;
X}
X#endif
X
X#ifdef L_umodsi3
X_umodsi3 (a, b)
X     unsigned a, b;
X{
X  return a % b;
X}
X#endif
X
X#ifdef L_modsi3
X_modsi3 (a, b)
X     int a, b;
X{
X  return a % b;
X}
X#endif
X
X#ifdef L_lshrsi3
X_lshrsi3 (a, b)
X     unsigned a, b;
X{
X  return a >> b;
X}
X#endif
X
X#ifdef L_lshlsi3
X_lshlsi3 (a, b)
X     unsigned a, b;
X{
X  return a << b;
X}
X#endif
X
X#ifdef L_ashrsi3
X_ashrsi3 (a, b)
X     int a, b;
X{
X  return a >> b;
X}
X#endif
X
X#ifdef L_ashlsi3
X_ashlsi3 (a, b)
X     int a, b;
X{
X  return a << b;
X}
X#endif
X
X
X#define L_divdf3
X#define L_muldf3
X#define L_negdf2
X#define L_adddf3
X#define L_subdf3
X#define L_cmpdf2
X
X#ifdef L_divdf3
Xdouble
X_divdf3 (a, b)
X     double a, b;
X{
X  return a / b;
X}
X#endif
X
X#ifdef L_muldf3
Xdouble
X_muldf3 (a, b)
X     double a, b;
X{
X  return a * b;
X}
X#endif
X
X#ifdef L_negdf2
Xdouble
X_negdf2 (a)
X     double a;
X{
X  return -a;
X}
X#endif
X
X#ifdef L_adddf3
Xdouble
X_adddf3 (a, b)
X     double a, b;
X{
X  return a + b;
X}
X#endif
X
X#ifdef L_subdf3
Xdouble
X_subdf3 (a, b)
X     double a, b;
X{
X  return a - b;
X}
X#endif
X
X#ifdef L_cmpdf2
Xint
X_cmpdf2 (a, b)
X     double a, b;
X{
X  if (a > b)
X    return 1;
X  else if (a < b)
X    return -1;
X  return 0;
X}
X#endif
X
X#define L_fixunsdfsi
X#define L_fixunsdfdi
X#define L_fixdfsi
X#define L_fixdfdi
X#define L_floatsidf
X#define L_floatdidf
X
X#ifdef L_fixunsdfsi
X_fixunsdfsi (a)
X     double a;
X{
X  return (unsigned int) a;
X}
X#endif
X
X#ifdef L_fixunsdfdi
Xdouble
X_fixunsdfdi (a)
X     double a;
X{
X  union double_di u;
X  u.i[LOW] = (unsigned int) a;
X  u.i[HIGH] = 0;
X  return u.d;
X}
X#endif
X
X#ifdef L_fixdfsi
X_fixdfsi (a)
X     double a;
X{
X  return (int) a;
X}
X#endif
X
X#ifdef L_fixdfdi
Xdouble
X_fixdfdi (a)
X     double a;
X{
X  union double_di u;
X  u.i[LOW] = (int) a;
X  u.i[HIGH] = (int) a < 0 ? -1 : 0;
X  return u.d;
X}
X#endif
X
X#ifdef L_floatsidf
Xdouble
X_floatsidf (a)
X     int a;
X{
X  return (double) a;
X}
X#endif
X
X#ifdef L_floatdidf
Xdouble
X_floatdidf (u)
X     union double_di u;
X{
X  register double hi
X    = ((double) u.i[HIGH]) * (double) 0x10000 * (double) 0x10000;
X  register double low = (unsigned int) u.i[LOW];
X  return hi + low;
X}
X#endif
X
X
X#define L_addsf3
X#define L_negsf2
X#define L_subsf3
X#define L_cmpsf2
X#define L_mulsf3
X#define L_divsf3
X#define L_truncdfsf2
X#define L_extendsfdf2
X
X#ifdef L_addsf3
XSFVALUE
X_addsf3 (a, b)
X     union flt_or_int a, b;
X{
X  union flt_or_int intify;
X  return INTIFY (a.f + b.f);
X}
X#endif
X
X#ifdef L_negsf2
XSFVALUE
X_negsf2 (a)
X     union flt_or_int a;
X{
X  union flt_or_int intify;
X  return INTIFY (-a.f);
X}
X#endif
X
X#ifdef L_subsf3
XSFVALUE
X_subsf3 (a, b)
X     union flt_or_int a, b;
X{
X  union flt_or_int intify;
X  return INTIFY (a.f - b.f);
X}
X#endif
X
X#ifdef L_cmpsf2
XSFVALUE
X_cmpsf2 (a, b)
X     union flt_or_int a, b;
X{
X  union flt_or_int intify;
X  if (a.f > b.f)
X    return 1;
X  else if (a.f < b.f)
X    return -1;
X  return 0;
X}
X#endif
X
X#ifdef L_mulsf3
XSFVALUE
X_mulsf3 (a, b)
X     union flt_or_int a, b;
X{
X  union flt_or_int intify;
X  return INTIFY (a.f * b.f);
X}
X#endif
X
X#ifdef L_divsf3
XSFVALUE
X_divsf3 (a, b)
X     union flt_or_int a, b;
X{
X  union flt_or_int intify;
X  return INTIFY (a.f / b.f);
X}
X#endif
X
X#ifdef L_truncdfsf2
XSFVALUE
X_truncdfsf2 (a)
X     double a;
X{
X  union flt_or_int intify;
X  return INTIFY (a);
X}
X#endif
X
X#ifdef L_extendsfdf2
Xdouble
X_extendsfdf2 (a)
X     union flt_or_int a;
X{
X  union flt_or_int intify;
X  return a.f;
X}
X#endif
SHAR_EOF
true || echo 'restore of gnulib_.c failed'
fi
# ============= ldexp.c ==============
if test -f 'ldexp.c' -a X"$1" != X"-c"; then
	echo 'x - skipping ldexp.c (File already exists)'
else
echo 'x - extracting ldexp.c (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'ldexp.c' &&
X/*
X * double ldexp(value, exp);
X * double value;
X * unsigned int exp;
X *
X * returns value * 2**exp
X *
X * ++jrb	bammi at dsrgsun.ces.cwru.edu
X */
X#include "flonum.h"
X
X#ifdef SHORTLIB /* in short lib, this file is compiled with 32 bit ints */
X#  ifdef __STDC__
X    double ldexp(double value, unsigned short ex)
X#  else
X    double ldexp(value, ex)
X    double value;
X    unsigned short ex;
X#  endif
X#else
X#  ifdef __STDC__
X    double ldexp(double value, unsigned int ex)
X#  else
X    double ldexp(value, ex)
X    double value;
X    unsigned int ex;
X#  endif
X#endif /* SHORTLIB */
X{
X	struct bitdouble *res = (struct bitdouble *) &value;
X
X	res->exp   += ex;
X	return value;
X}
SHAR_EOF
true || echo 'restore of ldexp.c failed'
fi
# ============= ldexp.s ==============
if test -f 'ldexp.s' -a X"$1" != X"-c"; then
	echo 'x - skipping ldexp.s (File already exists)'
else
echo 'x - extracting ldexp.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'ldexp.s' &&
X#NO_APP
Xgcc_compiled.:
X.text
X	.even
X.globl _ldexp
X_ldexp:
X	link a6,#-4
X	movel a6,a1
X	addqw #8,a1
X	movel a1,a6@(-4)
X	movel a6@(-4),d0
X	movel d0,a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d0
X	andl #2047,d0
X	addl a6@(16),d0
X	andw #2047,d0
X	lslw #4,d0
X	andw #32783,a0@
X	orw d0,a0@
X	movel a6@(8),d0
X	movel a6@(12),d1
X	jra L1
XL1:
X	unlk a6
X	rts
SHAR_EOF
true || echo 'restore of ldexp.s failed'
fi
# ============= modf.c ==============
if test -f 'modf.c' -a X"$1" != X"-c"; then
	echo 'x - skipping modf.c (File already exists)'
else
echo 'x - extracting modf.c (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'modf.c' &&
X/*
X * double modf(value, iptr)
X * double value;
X * double *iptr;
X *
X * returns fractional part of value
X *	   in *iptr returns the integral part
X *	   such that (*iptr + fractional) == value
X *
X * ++jrb	bammi at dsrgsun.ces.cwru.edu
X *
X * special thanks to peter housel  housel at ecn.purdue.edu
X * this (improved) version is based on his X86 code
X *
X */
X#include "flonum.h"
X
X#define BIAS 1023
X#define B1   1022	/* BIAS - 1 */
X#define B2   1075	/* (BIAS - 1) + 53 == max bias for integ part    */
X#define B3   1011	/* (BIAS - 1) - 11 == the max bias for a frac. # */
X
Xstruct ldouble {
X    unsigned long hi, lo;
X};				/* another alias for double */
X
Xdouble modf(value, iptr)
Xdouble value, *iptr;
X{
X    struct bitdouble *bd = (struct bitdouble *)&value;
X    unsigned int expo = bd->exp;
X#ifdef __STDC__
X    double norm( double, int, int, int );
X#else
X    extern double norm();
X#endif
X    
X    if(expo <= B1)	/* all frac, no integ */
X    {
X	*iptr = 0.0;
X	return value;
X    }
X    if(expo >= B2)	/* all integ, no frac */
X    {
X	*iptr = value;
X	return 0.0;
X    }
X
X    /* both integ and frac */
X    {
X	register unsigned long i0, i1;	/* integral   part accumulator */
X	register unsigned long f0, f1;	/* fractional part accumulator */
X	unsigned int sign  = bd->sign;
X	
X	i0 = bd->mant1 | 0x00100000L;
X	i1 = bd->mant2;
X	f0 = f1 = 0L;
X
X	do
X	{
X	    /* shift R integ/frac, with bit falling into frac acc */
X	    asm volatile(" lsrl  #1,%1;
X 			   roxrl #1,%0"
X		: "=d" (i1), "=d" (i0)
X		: "0"  (i1), "1"  (i0) );
X
X	    asm volatile(" roxrl #1,%1;
X 			   roxrl #1,%0"
X		: "=d" (f1), "=d" (f0)
X		: "0"  (f1), "1"  (f0) );
X
X	} while(++expo < B2);
X
X	/* stuff the results, and normalize */
X	((struct ldouble *)&value)->hi = i0;	/* integral part */
X	((struct ldouble *)&value)->lo = i1;
X	*iptr = norm(value, expo, sign, 0);
X
X	/* dont call norm if frac is all zero, as B3 exp will screw it up */
X	if((f0 == 0) && (f1 == 0))
X	    return 0.0;
X
X	((struct ldouble *)&value)->hi = f0;	/* fractional part */
X	((struct ldouble *)&value)->lo = f1;
X	return norm(value, B3, sign, 0);
X    }
X}
X
X
X#ifdef TEST
X#include <stdio.h>
X#ifdef __MSHORT__
X#error "please run this test in 32 bit int mode"
X#endif
X
X#define NTEST	100000L
X#define MAXRAND 0x7fffffffL	/* assuming 32 bit ints */
X#define ABS(X)	( (X < 0) ? (-X) : X )
Xextern long rand();
X
Xint main()
X{
X    double frac, integ, e, expected, result, max_abs_err;
X    register long i;
X    register long errs = 0;
X    register int s;
X
X    max_abs_err = 0.0;
X    for(i = 0; i < NTEST; i++)
X    {
X	expected = ((double)(s = rand()) + (double)rand())/(double)(MAXRAND);
X	if(s > (MAXRAND >> 1)) expected = -expected;
X	frac = modf(expected, &integ);
X	if(ABS(frac) >= 1.0)
X	{
X	    printf("|frac| >= 1, %.6e:  integ %.6e  frac %.6e\n",
X		   expected, integ, frac);
X	    errs++;
X	    continue;
X	}
X	if( (integ != 0) && (ABS(integ) < 1.0) )
X	{
X	    printf("|integ| < 1, %.6e:  integ %.6e  frac %.6e\n",
X		   expected, integ, frac);
X	    errs++;
X	    continue;
X	}
X
X	result = integ + frac;
X	e = (expected == 0.0) ? result : (result - expected)/expected;
X	if(e < 0) e = (-e);
X	if(e > 1.0e-6)
X	{
X	    printf("%.4e: I %.4e F %.4e R %.4e E %.8e\n",
X		   expected, integ, frac, result, e);
X	    errs++;
X	}
X	if (e > max_abs_err) max_abs_err = e;
X    }
X    
X    printf("%ld Error(s), Max abs err %.14e\n", errs, max_abs_err);
X    return errs;
X}
X#endif /* TEST */
SHAR_EOF
true || echo 'restore of modf.c failed'
fi
# ============= modf.s ==============
if test -f 'modf.s' -a X"$1" != X"-c"; then
	echo 'x - skipping modf.s (File already exists)'
else
echo 'x - extracting modf.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'modf.s' &&
X#NO_APP
Xgcc_compiled.:
X.text
X	.even
XLC0:
X	.double 0r0.00000000000000000000e+00
X	.even
X.globl _modf
X_modf:
X	link a6,#-12
X	moveml #0x3f00,sp at -
X	movel a6,d6
X	addql #8,d6
X	movel d6,a6@(-4)
X	movel a6@(-4),a0
X	movew a0@,d0
X	lsrw #4,d0
X	moveq #0,d1
X	movew d0,d1
X	movel d1,d6
X	andl #2047,d6
X	movel d6,a6@(-8)
X	cmpl #1022,a6@(-8)
X	jhi L2
X	movel a6@(16),a0
X	movel LC0,d6
X	movel LC0+4,d7
X	movel d6,a0@
X	movel d7,a0@(4)
X	movel a6@(8),d0
X	movel a6@(12),d1
X	jra L1
XL2:
X	cmpl #1074,a6@(-8)
X	jls L3
X	movel a6@(16),a0
X	movel a6@(8),d6
X	movel a6@(12),d7
X	movel d6,a0@
X	movel d7,a0@(4)
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L1
XL3:
X	movel a6@(-4),a0
X	moveb a0@,d0
X	lsrb #7,d0
X	moveq #0,d1
X	moveb d0,d1
X	movel d1,a6@(-12)
X	movel a6@(-4),a0
X	movel a0@,d2
X	andl #1048575,d2
X	bset #20,d2
X	movel a6@(-4),a0
X	movel a0@(4),d3
X	moveq #0,d5
X	moveq #0,d4
XL4:
X#APP
X	 lsrl  #1,d2;
X 			   roxrl #1,d3
X	 roxrl #1,d4;
X 			   roxrl #1,d5
X#NO_APP
XL6:
X	addql #1,a6@(-8)
X	cmpl #1074,a6@(-8)
X	jhi L5
X	jra L4
XL5:
X	movel d2,a6@(8)
X	movel d3,a6@(12)
X	clrl sp at -
X	movel a6@(-12),sp at -
X	movel a6@(-8),sp at -
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr _norm
X	movel d0,d0
X	movel d1,d1
X	movel a6@(16),a0
X	movel d0,a0@
X	movel d1,a0@(4)
X	addw #20,sp
X	tstl d4
X	jne L7
X	tstl d5
X	jne L7
X	movel LC0,d0
X	movel LC0+4,d1
X	jra L1
XL7:
X	movel d4,a6@(8)
X	movel d5,a6@(12)
X	clrl sp at -
X	movel a6@(-12),sp at -
X	pea 1011:w
X	movel a6@(12),sp at -
X	movel a6@(8),sp at -
X	jbsr _norm
X	jra L1
XL1:
X	moveml a6@(-36),#0xfc
X	unlk a6
X	rts
SHAR_EOF
true || echo 'restore of modf.s failed'
fi
# ============= modsi3.s ==============
if test -f 'modsi3.s' -a X"$1" != X"-c"; then
	echo 'x - skipping modsi3.s (File already exists)'
else
echo 'x - extracting modsi3.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'modsi3.s' &&
X	.text			| cjpurcell 30Jul89
X	.even			|
X	.globl __modsi3		|
X	.globl ___modsi3	|
X__modsi3:			|
X___modsi3:			|
X	link a6,#-4		|
X	movl d2,sp at -		|
X	movel a6@(12),sp at -	|
X	movel a6@(8),sp at -	|
X	jbsr ___divsi3		|
X	movel d0,d0		|
X	movel a6@(12),sp at -	|
X	movel d0,sp at -		|
X	jbsr ___mulsi3		|
X	movel d0,d2		|
X	movel a6@(8),d1		|
X	subl d2,d1		|
X	movel d1,a6@(-4)	|
X	movel a6@(-4),d0	|
X	movl a6@(-8),d2		|
X	unlk a6			|
X	rts			|
SHAR_EOF
true || echo 'restore of modsi3.s failed'
fi
# ============= mulsi3.s ==============
if test -f 'mulsi3.s' -a X"$1" != X"-c"; then
	echo 'x - skipping mulsi3.s (File already exists)'
else
echo 'x - extracting mulsi3.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'mulsi3.s' &&
X	.text			| cjpurcell 30Jul89
X	.even			|
X	.globl __mulsi3		|
X	.globl ___mulsi3	|
X__mulsi3:			|
X___mulsi3:			|
X	link a6,#-2		| one word of local
X	clrw a6@(-2)		| zap the local; it's our neg flg
X				|
X	movel a6@(12),d0	| get b
X	bpl muls1		| pos, ok
X	negl d0			| negate it
X	addqw #1,a6@(-2)	| and inc the flag
Xmuls1:				|
X	movel d0,sp at -		| push adjusted b
X	movel a6@(8),d0		| get a
X	bpl muls2		| pos, ok
X	negl d0			| negate it
X	subqw #1,a6@(-2)	| and dec the flag
Xmuls2:				|
X	movel d0,sp at -		| push adjusted a
X	jbsr ___umulsi3		| do an unsigned mult
X	addql #8,sp		| flush args
X	tstw a6@(-2)		| flag set?
X	beq muls3		| nope, go ahead and return
X	negl d0			| negate the result
Xmuls3:				|
X				|
X	unlk a6			| unlink the frame
X	rts			| done!
SHAR_EOF
true || echo 'restore of mulsi3.s failed'
fi
# ============= norm.c ==============
if test -f 'norm.c' -a X"$1" != X"-c"; then
	echo 'x - skipping norm.c (File already exists)'
else
echo 'x - extracting norm.c (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'norm.c' &&
X/*
X *  Ieee double normalization function
X *
X *  double norm( double d, int exp, int sign, int rbits )
X *
X *  inputs:
X *	m	a 63 bit mantissa (passed as 8 bytes in a double's storage)
X *	exp	BIASED exponent
X *	sign	sign of the result (1 == -ve  0 == +ve)
X *	rbits	8 bits of rounding info
X *	  on input radix point is at extreme right (exp should reflect this)
X * output:
X *	the normalized double in ieee double format
X *
X * algorithm:
X *	see posting by p. housel housel at ecn.purdue.edu (thanks peter)
X *	    and  knuth v2 section 4.2.1	algorithm N
X *
X *	++jrb	bammi at dsrgsun.ces.cwru.edu
X * bugs:
X *	strictly mc68X coding
X *	(can be coded using gnu c `long long' type very easily)
X */
X#include "flonum.h"
X
Xstruct ldouble {
X    unsigned long hi, lo;
X};		/* yet another alias for a double */
X
Xdouble norm( double m, int exp, int sign, int rbits )
X{
X    /* we'll use constraints later for values to end up in D reggies */
X    register unsigned long d0, d1;	/*  64 bit accumulator     */
X    register char rounding; /* rounding reg-signed char for convenience */
X    double result;			/* the normalized result    */
X    struct bitdouble *r = (struct bitdouble *)&result; /* alias    */
X
X    /* init */
X    d0 = ((struct ldouble *)&m)->hi;
X    d1 = ((struct ldouble *)&m)->lo;
X    rounding = rbits;
X    
X    if( (d0 == 0) && (d1 == 0) && (rounding == 0) )
X	goto stuff_result; /* nothing to do, but stuff bits into result */
X
X    /* keep shifting R & accumulating rounding bits until we have 53 bits */
X    /* lsb of rounding register is sticky for 1 bits shifted out          */
X
X    asm volatile("ReNormalize:");	/* ok, i admit it :-) */
X    while( (d0 & 0xffe00000L) != 0)
X    {
X#if 0 /* only 5 operands allowed in asm() - why?? */
X	asm volatile (" lsrl	#1,%2 ; 
X		        roxrl	#1,%1 ;
X 			roxrb	#1,%0 ;
X 			jcc	NoCarry;
X 			orb	#1,%0 ;
X 			NoCarry:      "
X        : "=d" (rounding), "=d" (d1), "=d" (d0)	   /* outputs */
X        : "0"  (rounding), "1"  (d1), "2"  (d0) ); /* inputs  */
X#else
X	asm volatile (" lsrl	#1,%1 ; 
X		        roxrl	#1,%0 "
X        : "=d" (d1), "=d" (d0)	   /* outputs */
X        : "0"  (d1), "1"  (d0) ); /* inputs  */
X        asm volatile ("	roxrb	#1,%0 ;
X 			jcc	NoCarry;
X 			orb	#1,%0 ;
X 			NoCarry:      "
X        : "=d" (rounding)	   /* outputs */
X        : "0"  (rounding) ); /* inputs  */
X#endif
X	/* increment exponent to reflect the shift */
X	exp++;
X    }
X
X    /* shift L until there is a 1 in the hidden bit pos, shifting
X       the rounding reggie into the lsb */
X    while( (d0 & 0x00100000L) == 0)
X    {
X#if 0 /* same here */
X	asm volatile ("	lslb	#1,%2 ;
X 			roxll	#1,%1 ;
X 			roxll	#1,%0 "
X        : "=d" (d0), "=d" (d1), "=d" (rounding)	   /* outputs */
X        : "0"  (d0), "1"  (d1), "2"  (rounding) ); /* inputs  */
X#else
X	asm volatile ("	lslb	#1,%1 ;
X 			roxll	#1,%0 "
X        : "=d" (d1), "=d" (rounding)	   /* outputs */
X        : "0"  (d1), "1"  (rounding) );	   /* inputs  */
X
X	asm volatile ("	roxll	#1,%0 "
X        : "=d" (d0)	   /* outputs */
X        : "0"  (d0)); 	   /* inputs  */
X#endif
X	/* decrement exponent to reflect the shift */
X	--exp;
X    }
X
X    /* check rounding register */
X    if (rounding < 0)
X    {	/* not round down */
X	if( (((unsigned char)rounding) & 0x80) == 0)
X	{	/* tie: round to even */
X	    d1 &= 0xfffffffeL;	/* set lsb to 0 */
X	}
X	else
X	{	/* round up */
X	    rounding = 0;
X	asm volatile ("
X		addql  #1,%1 ;
X		jcc    NoCarry1 ;
X		addql  #1,%0 ;
X    		bra   ReNormalize; |just in case of overflow into hidden bit\n
X                NoCarry1:           "
X        : "=d" (d0), "=d" (d1)	   /* outputs */
X        : "0"  (d0), "1"  (d1) );  /* inputs  */
X	}
X    }
X    
X    /* exponent underflow ?? */
X    if(exp <= 0)
X    {
X	printf("Underflow %lx %lx %d\n", d0, d1, exp);
X	d0 = 0;
X	d1 = 0;
X	sign = 0;
X	exp = 0;
X	goto stuff_result;
X    }
X    /* exp overflow ? */
X    if(exp >= 2047)
X    {
X	/* cause overflow trap, but how ?? */
X	printf("Overflow %lx %lx %d\n", d0, d1, exp);
X    }
X    
X    stuff_result: /* stuff bit in result and ret */
X    r->sign = sign;
X    r->exp  = exp;
X    r->mant1 = d0;
X    r->mant2 = d1;
X    
X    return result;
X}
X
X#ifdef TEST
X#include <stdio.h>
X
Xmain()
X{
X    register unsigned long d0, d1;
X    double r1, r2, r3;
X    struct bitdouble *pr1 = (struct bitdouble *)&r1;
X    struct bitdouble *pr3 = (struct bitdouble *)&r3;
X    unsigned long *l = (unsigned long *)&r2;
X    
X    r2 = r1 = 3.14159265358979323846265e23;
X
X    *l &= 0x000FFFFFL;	/* wallup sign and exponent  in r2 */
X    *l |= 0x00100000L;  /* add hidden bit	    	   */
X    
X    /* try the straight case first */
X    r3 = norm(r2, (unsigned int)pr1->exp, (unsigned int)pr1->sign, 0);
X    
X    printf("%30.25e %05lx %08lx %4d %1d\n", r1, pr1->mant1, pr1->mant2,
X	   pr1->exp, pr1->sign);
X    printf("%30.25e %05lx %08lx %4d %1d\n\n", r3, pr3->mant1, pr3->mant2,
X	   pr3->exp, pr3->sign);
X
X    /* shift L and try */
X    d0 = l[0];
X    d1 = l[1];
X    asm volatile ("	lsll	#1,%1 ;
X			roxll	#1,%0 "
X        : "=d" (d0), "=d" (d1)	   /* outputs */
X        : "0"  (d0), "1"  (d1) );  /* inputs  */
X    l[0] = d0;
X    l[1] = d1;
X    
X    r3 = norm(r2, (unsigned int)pr1->exp - 1, (unsigned int)pr1->sign, 0);
X    
X    printf("%30.25e %05lx %08lx %4d %1d\n", r1, pr1->mant1, pr1->mant2,
X	   pr1->exp, pr1->sign);
X    printf("%30.25e %05lx %08lx %4d %1d\n\n", r3, pr3->mant1, pr3->mant2,
X	   pr3->exp, pr3->sign);
X
X    /* how about a shift R */
X    r2 =r1;
X
X    *l &= 0x000FFFFFL;	/* wallup sign and exponent  in r2 */
X    *l |= 0x00100000L;  /* add hidden bit	    	   */
X
X    d0 = l[0];
X    d1 = l[1];
X    asm volatile ("	lsrl	#1,%0 ;
X			roxrl	#1,%1 "
X        : "=d" (d0), "=d" (d1)	   /* outputs */
X        : "0"  (d0), "1"  (d1) );  /* inputs  */
X    l[0] = d0;
X    l[1] = d1;
X    
X    r3 = norm(r2, (unsigned int)pr1->exp + 1, (unsigned int)pr1->sign, 0);
X    
X    printf("%30.25e %05lx %08lx %4d %1d\n", r1, pr1->mant1, pr1->mant2,
X	   pr1->exp, pr1->sign);
X    printf("%30.25e %05lx %08lx %4d %1d\n\n", r3, pr3->mant1, pr3->mant2,
X	   pr3->exp, pr3->sign);
X}
X#endif
SHAR_EOF
true || echo 'restore of norm.c failed'
fi
# ============= norm.s ==============
if test -f 'norm.s' -a X"$1" != X"-c"; then
	echo 'x - skipping norm.s (File already exists)'
else
echo 'x - extracting norm.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'norm.s' &&
X#NO_APP
Xgcc_compiled.:
X.text
XLC0:
X	.ascii "Underflow %lx %lx %d\12\0"
XLC1:
X	.ascii "Overflow %lx %lx %d\12\0"
X	.even
X.globl _norm
X_norm:
X	link a6,#-12
X	moveml #0x3c00,sp at -
X	movel a6,d5
X	subql #8,d5
X	movel d5,a6@(-12)
X	movel a6@(8),d2
X	movel a6@(12),d3
X	moveb a6@(27),d4
X	tstl d2
X	jne L2
X	tstl d3
X	jne L2
X	tstb d4
X	jne L2
X	jra L3
XL2:
X#APP
X	ReNormalize:
X#NO_APP
XL4:
X	movel d2,d0
X	andl #-2097152,d0
X	tstl d0
X	jeq L5
X#APP
X	 lsrl	#1,d2 ; 
X		        roxrl	#1,d3 
X		roxrb	#1,d4 ;
X 			jcc	NoCarry;
X 			orb	#1,d4 ;
X 			NoCarry:      
X#NO_APP
X	addql #1,a6@(16)
X	jra L4
XL5:
X	nop
XL6:
X	movel d2,d0
X	andl #1048576,d0
X	tstl d0
X	jne L7
X#APP
X		lslb	#1,d4 ;
X 			roxll	#1,d3 
X		roxll	#1,d2 
X#NO_APP
X	subql #1,a6@(16)
X	jra L6
XL7:
X	tstb d4
X	jge L8
X	moveq #0,d0
X	moveb d4,d0
X	andl #128,d0
X	tstl d0
X	jne L9
X	moveq #-2,d5
X	andl d5,d3
X	jra L10
XL9:
X	clrb d4
X#APP
X	
X		addql  #1,d3 ;
X		jcc    NoCarry1 ;
X		addql  #1,d2 ;
X    		bra   ReNormalize; |just in case of overflow into hidden bit
X
X                NoCarry1:           
X#NO_APP
XL10:
XL8:
X	tstl a6@(16)
X	jgt L11
X	movel a6@(16),sp at -
X	movel d3,sp at -
X	movel d2,sp at -
X	pea LC0
X	jbsr _printf
X	moveq #0,d2
X	moveq #0,d3
X	clrl a6@(20)
X	clrl a6@(16)
X	addw #16,sp
X	jra L3
XL11:
X	cmpl #2046,a6@(16)
X	jle L12
X	movel a6@(16),sp at -
X	movel d3,sp at -
X	movel d2,sp at -
X	pea LC1
X	jbsr _printf
X	addw #16,sp
XL12:
X	nop
XL3:
X	movel a6@(-12),a0
X	moveb a6@(23),d0
X	andb #1,d0
X	lslb #7,d0
X	andb #127,a0@
X	orb d0,a0@
X	movel a6@(-12),a0
X	movew a6@(18),d0
X	andw #2047,d0
X	lslw #4,d0
X	andw #32783,a0@
X	orw d0,a0@
X	movel a6@(-12),a0
X	movel d2,d0
X	andl #1048575,d0
X	andl #-1048576,a0@
X	orl d0,a0@
X	movel a6@(-12),a0
X	movel d3,a0@(4)
X	movel a6@(-8),d0
X	movel a6@(-4),d1
X	jra L1
XL1:
X	moveml a6@(-28),#0x3c
X	unlk a6
X	rts
SHAR_EOF
true || echo 'restore of norm.s failed'
fi
# ============= sflonum.s ==============
if test -f 'sflonum.s' -a X"$1" != X"-c"; then
	echo 'x - skipping sflonum.s (File already exists)'
else
echo 'x - extracting sflonum.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'sflonum.s' &&
X#
X#	Floating point support code.  What a crock!
X#
X#	A float looks like:
X#
X#	|S|E.x.p ... |M.a.n.t.i.s.s.a ... |
X#	+-+----------+--------------------+
X#
X#	where s is the sign bit, Exp is 8 bits of exponent, interpreted
X#	as E + 126, and Mantissa is 23 bits of fraction, with a
X#	hidden bit.  The point is to the left of the hidden bit.
X#	Doubles have another word of mantissa following.
X#
X#	All these routines have calling sequences like c routines,
X#	ie args on stack in backwards order, return values in d0
X#
X
X# union double_di { double d; int i[2]; };
X# union flt_or_int { int i; float f; };
X#
X## #ifdef L_divdf3
X## double
X## _divdf3 (a, b)
X##      double a, b;
X## {
X##   return a / b;
X## }
X## #endif
X#.text
X#	.even
X#.globl ___divdf3
X#__divdf3:
X#	movel	sp@(12),sp at -
X#	movel	sp@(8),sp at -
X#	jsr	__divsf3
X#	addql	#8,sp
X#	clrl	d1		| kludge!!!
X#	rts			| sigh
X## 
X## #ifdef L_muldf3
X## double
X## _muldf3 (a, b)
X##      double a, b;
X## {
X##   return a * b;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __muldf3
X#__muldf3:
X#	movel	sp@(12),sp at -
X#	movel	sp@(8),sp at -
X#	jsr	__mulsf3
X#	addql	#8,sp
X#	clrl	d1		| kludge!!!
X#	rts			| sigh
X## 
X## #ifdef L_negdf2
X## double
X## _negdf2 (a)
X##      double a;
X## {
X##   return -a;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __negdf2
X#__negdf2:
X#	movel	sp@(8),d1		| get a lo
X#	movel	sp@(4),d0		| get a hi
X#	beq	negdf2_z		| zero, leave it
X#	eorl	#0x80000000,d0		| twiddle sign
X#negdf2_z:
X#	rts
X## 
X## #ifdef L_adddf3
X## double
X## _adddf3 (a, b)
X##      double a, b;
X## {
X##   return a + b;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __adddf3
X#__adddf3:
X#	movel	sp@(12),sp at -
X#	movel	sp@(8),sp at -
X#	jsr	__addsf3
X#	addql	#8,sp
X#	clrl	d1		| kludge!!!
X#	rts			| sigh
X## 
X## #ifdef L_subdf3
X## double
X## _subdf3 (a, b)
X##      double a, b;
X## {
X##   return a - b;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __subdf3
X#__subdf3:
X#	movel	sp@(12),sp at -
X#	movel	sp@(8),sp at -
X#	jsr	__subsf3
X#	addql	#8,sp
X#	clrl	d1		| kludge!!!
X#	rts			| sigh
X## 
X## #ifdef L_cmpdf2
X## int
X## _cmpdf2 (a, b)
X##      double a, b;
X## {
X##   if (a > b)
X##     return 1;
X##   else if (a < b)
X##     return -1;
X##   return 0;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __cmpdf2
X#__cmpdf2:
X#	movel	sp@(4),d0	| get b hi
X#	movel	sp@(12),d1	| get a hi
X#|
X#| crockery.  If both neg and not equal, this algorithm lose.  find a better one!
X#|
X#	bpl	cmpdf2_p
X#	tstl	d0
X#	bpl	cmpdf2_p
X#	cmpl	d1,d0
X#	bgt	cmpdf2_m
X#	blt	cmpdf2_1
X#	beq	cmpdf2_z
X#cmpdf2_p:
X#	cmpl	d1,d0		| get a hi
X#	beq	cmpdf2_z	| if eq, return 0
X#	bgt	cmpdf2_1	| if greater, return 1
X#cmpdf2_m:
X#	movel	#-1,d0		| else return -1
X#	rts
X#cmpdf2_z:
X#	clrl	d0
X#	rts
X#cmpdf2_1:
X#	movel	#1,d0
X#	rts			| sigh
X## 
X## #ifdef L_fixunsdfsi
X## _fixunsdfsi (a)
X##      double a;
X## {
X##   return (unsigned int) a;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __fixunsdfsi
X#__fixunsdfsi:
X#	clrl d0
X#	clrl d1
X#	rts			| sigh
X## 
X## #ifdef L_fixunsdfdi
X## double
X## _fixunsdfdi (a)
X##      double a;
X## {
X##   union double_di u;
X##   u.i[LOW] = (unsigned int) a;
X##   u.i[HIGH] = 0;
X##   return u.d;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __fixunsdfdi
X#__fixunsdfdi:
X#	clrl d0
X#	clrl d1
X#	rts			| sigh
X## 
X## #ifdef L_fixdfsi
X## _fixdfsi (a)
X##      double a;
X## {
X##   return (int) a;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __fixdfsi
X#__fixdfsi:
X#	link	a6,#0
X#	movel	d2,sp at -		| save reg
X#	clrl	d2		| sign flag
X#	movel	a6@(8),d0	| get the float
X#	beq	fixdfsi_ret
X#	bpl	fixdfsi_1
X#	addql	#1,d2
X#fixdfsi_1:
X#	movel	d0,d1		| snag the exp
X#	lsrl	#7,d1
X#	lsrl	#8,d1
X#	lsrl	#8,d1
X#	andl	#0xFF,d1
X#	subl	#126,d1
X#	andl	#0x7FFFFF,d0	| zap cruft
X#	orl	#0x800000,d0	| put back hidden bit
X#|
X#| at this point the mantissa looks like 2^24 * integer value.
X#| if Exp is 24, were done.  If its less, we shift right,
X#| else left
X#|
X#fixdfsi_2:
X#	cmpl	#24,d1
X#	beq	fixdfsi_4	| were done
X#	bmi	fixdfsi_3	| less, shift right
X#	lsll	#1,d0		| greater, shift it left one
X#	subql	#1,d1		| and dec exp
X#	bra	fixdfsi_2
X#fixdfsi_3:
X#	lsrl	#1,d0		| shift right one
X#	addql	#1,d1		| and inc exp
X#	bra	fixdfsi_2
X#fixdfsi_4:
X#	tstl	d2		| negative?
X#	beq	fixdfsi_ret
X#	negl	d0
X#fixdfsi_ret:
X#	movel	sp at +,d2		| get d2 back
X#	unlk	a6
X#	rts
X## 
X## #ifdef L_fixdfdi
X## double
X## _fixdfdi (a)
X##      double a;
X## {
X##   union double_di u;
X##   u.i[LOW] = (int) a;
X##   u.i[HIGH] = (int) a < 0 ? -1 : 0;
X##   return u.d;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __fixdfdi
X#__fixdfdi:
X#	clrl d0
X#	clrl d1
X#	rts			| sigh
X## 
X## #ifdef L_floatsidf
X## double
X## _floatsidf (a)
X##      int a;
X## {
X##   return (double) a;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __floatsidf
X#__floatsidf:
X#	link	a6,#0		| set up frame
X#	movel	d2,sp at -		| save d2; sign flag
X#	clrl	d2		| not negative yet
X#	movel	#24,d1		| exponent so far
X#	movel	a6@(8),d0	| get the int
X#	beq	floatsidf_ret	| zero?
X#	bpl	floatsidf_1	| pos, its ok
X#	negl	d0		| negate it
X#	addql	#1,d2		| bump sign
X#floatsidf_1:
X#|
X#| normalize right if necessary
X#|
X#	cmpl	#0xFFFFFF,d0	| too big?
X#	ble	floatsidf_2	| nope, see if need to slide left
X#	addql	#1,d1		| bump exp
X#	lsrl	#1,d0		| and slide mantissa right one
X#	bra	floatsidf_1
X#floatsidf_2:
X#	btst	#23,d0		| got a bit up here yet?
X#	bne	floatsidf_3	| nope, go left
X#	subql	#1,d1		| dec exp
X#	lsll	#1,d0		| and slide mantissa left one
X#	bra	floatsidf_2
X#floatsidf_3:
X#|
X#| now put it all together
X#|
X#	andl	#0x7FFFFF,d0	| zap hidden bit
X#	addl	#126,d1		| offset exp
X#	andl	#0xFF,d1	| trim it
X#	lsll	#8,d1		| shift up
X#	lsll	#8,d1
X#	lsll	#7,d1
X#	orl	d1,d0		| stuff it in
X#	tstl	d2		| negative?
X#	beq	floatsidf_ret
X#	orl	#0x80000000,d0
X#floatsidf_ret:
X#	movel	sp at +,d2
X#	clrl	d1		| ???
X#	unlk	a6
X#	rts			| sigh
X## 
X## #ifdef L_floatdidf
X## double
X## _floatdidf (u)
X##      union double_di u;
X## {
X##   register double hi
X##     = ((double) u.i[HIGH]) * (double) 0x10000 * (double) 0x10000;
X##   register double low = (unsigned int) u.i[LOW];
X##   return hi + low;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __floatdidf
X#__floatdidf:
X#	clrl d0
X#	clrl d1
X#	rts			| sigh
X## 
X# #define INTIFY(FLOATVAL)  (intify.f = (FLOATVAL), intify.i)
X# 
X# #ifdef L_addsf3
X# int
X# _addsf3 (a, b)
X#      union flt_or_int a, b;
X# {
X#   union flt_or_int intify;
X#   return INTIFY (a.f + b.f);
X# }
X# #endif
X
X.text
X	.even
X.globl ___addsf3
X.globl __addsf3
X___addsf3:
X__addsf3:
X	link	a6,#0		| dont need any locals
X	moveml	#0x3F00,sp at -	| save all data registers
X	movel	a6@(8),d0	| get a
X	beq	addsf_ret_b	|  zero .. just return b
X	movel	#23,d6		| shift count
X	movel	d0,d2		| get the exponent
X	lsrl	d6,d2		| and shift right
X	andl	#0xFF,d2	| no sign bit
X	subl	#126,d2		| offset the exponent
X	movel	a6@(12),d1	| get b
X	beq	addsf_ret_a
X	movel	d1,d3		| get the exponent for b
X	lsrl	d6,d3		| and shift right, with implicit extend
X	andl	#0xFF,d3	| make sure we didnt get a sign bit
X	subl	#126,d3		| off set this one too
X
X	andl	#0x7FFFFF,d0	| mask a for mantissa
X	orl	#0x800000,d0	| and put in hidden bit
X	tstl	a6@(8)		| test the original value
X	bpl	addsf_1		| pos, ok
X	negl	d0		| neg, negate the mantissa
Xaddsf_1:
X	andl	#0x7FFFFF,d1	| mask b for mantissa
X	orl	#0x800000,d1	| ditto
X	tstl	a6@(12)		| test ...
X	bpl	addsf_2
X	negl	d1		| negate this one
Xaddsf_2:
X	cmpl	d2,d3		| compare Ea to Eb
X	blt	addsf_3		| Ea > Eb
X
X	movel	d3,d5		| get Eb
X	subl	d2,d5		| subtract Ea
X	asrl	d5,d0		|  yielding count to shift Ma right
X	movel	d3,d5		| use this as resultant exponent
X	bra	addsf_4		| and go rejoin common part
Xaddsf_3:
X	movel	d2,d5		| get Ea
X	subl	d3,d5		| subtract Eb
X	asrl	d5,d1		|  yielding count to shift Mb right
X	movel	d2,d5		| use this as resultant exponent
X
Xaddsf_4:
X	clrl	d7		| zap sign flag
X	addl	d1,d0		| add Mb to Ma
X
X
X	beq	addsf_z		| zero? ok, go return zero
X	bpl	addsf_5		| positive? ok, go scale it
X	negl	d0		| negate Mr
X	movel	#1,d7		| remember sign
Xaddsf_5:
X	btst	#24,d0		| carry?
X	beq	addsf_6		| nope, its ok as is
X	asrl	#1,d0		| shift right one
X	addql	#1,d5		| inc exp
X
X| zzz check for overflow in here someplace
X
Xaddsf_6:
X	btst	#23,d0		| got a bit in the right place yet?
X	bne	addsf_7		| yes, were done
X	lsll	#1,d0		| shift left one
X	subql	#1,d5		| dec exponent
X	bra	addsf_6
Xaddsf_7:
X	andl	#0x7FFFFF,d0	| zap out hidden bit
X	addl	#126,d5		| add offset to exp
X	andl	#0xFF,d5	| zap to 8 bits
X	movel	#23,d6		| shift count
X	lsll	d6,d5		| shift the exp up
X	orl	d5,d0		| stick the exp in
X	tstl	d7		| negative?
X	beq	addsf_ret_a
X	orl	#0x80000000,d0	| yup, negate it
X	bra	addsf_ret_a
Xaddsf_z:
X	clrl	d0
X	bra	addsf_ret_a
Xaddsf_ret_b:
X	movel	a6@(12),d0
Xaddsf_ret_a:
X	moveml	sp at +,#0x00FC	| snarf back all regs
X	unlk	a6
X	rts			| sigh
X# 
X# #ifdef L_negsf2
X# int
X# _negsf2 (a)
X#      union flt_or_int a;
X# {
X#   union flt_or_int intify;
X#   return INTIFY (-a.f);
X# }
X# #endif
X
X.text
X	.even
X.globl ___negsf2
X.globl __negsf2
X___negsf2:
X__negsf2:
X	movel	sp@(4),d0
X	beq	negsf2_z
X	eorl	#0x80000000,d0
Xnegsf2_z:
X	rts			| sigh
X# 
X# #ifdef L_subsf3
X# int
X# _subsf3 (a, b)
X#      union flt_or_int a, b;
X# {
X#   union flt_or_int intify;
X#   return INTIFY (a.f - b.f);
X# }
X# #endif
X
X.text
X	.even
X.globl ___subsf3
X.globl __subsf3
X___subsf3:
X__subsf3:
X	tstl	sp@(8)		| kludge.  just negate b and add
X	beq	subsf_bz	| zero.  dont bother
X	eorl	#0x80000000,sp@(8)	| negate it
X	jmp	___addsf3
Xsubsf_bz:
X	movel	sp@(4),d0
X	rts
X
X# 
X# #ifdef L_cmpsf2
X# int
X# _cmpsf2 (a, b)
X#      union flt_or_int a, b;
X# {
X#   union flt_or_int intify;
X#   if (a.f > b.f)
X#     return 1;
X#   else if (a.f < b.f)
X#     return -1;
X#   return 0;
X# }
X# #endif
X
X.text
X	.even
X.globl ___cmpsf2
X.globl __cmpsf2
X___cmpsf2:
X__cmpsf2:
X	movel	sp@(4),d0
X	movel	sp@(12),d1	| get a hi
X|
X| crockery.  If both neg and not equal, this algorithm lose.  find a better one!
X|
X	bpl	cmpsf2_p
X	tstl	d0
X	bpl	cmpsf2_p
X	cmpl	d1,d0
X	bgt	cmpsf2_m
X	blt	cmpsf2_1
X	beq	cmpsf2_z
Xcmpsf2_p:
X	cmpl	d1,d0
X	beq	cmpsf2_z
X	bgt	cmpsf2_1
Xcmpsf2_m:
X	movel	#-1,d0
X	rts
Xcmpsf2_z:
X	clrl	d0
X	rts
Xcmpsf2_1:
X	movel	#1,d0
X	rts			| sigh
X# 
X# #ifdef L_mulsf3
X# int
X# _mulsf3 (a, b)
X#      union flt_or_int a, b;
X# {
X#   union flt_or_int intify;
X#   return INTIFY (a.f * b.f);
X# }
X# #endif
X
X.text
X	.even
X.globl ___mulsf3
X.globl __mulsf3
X___mulsf3:
X__mulsf3:
X|
X| multiply.  take the numbers apart.  shift each exponent down to
X| 16 bits.  unsigned multiply those.  shift that down to 24 bits.
X| exponent is Ea + Eb.
X|
X
X	link	a6,#-8		| 64 bit accum for mult
X	moveml	#0x3F00,sp at -	| save all data registers
X	movel	a6@(8),d0	| get a
X	beq	mulsf3_z
X	movel	a6@(12),d1	| get b
X	beq	mulsf3_z
X	movel	#23,d6		| shift count
X	movel	d0,d2		| get the exponent
X	lsrl	d6,d2		| and shift right
X	andl	#0xFF,d2
X	subl	#126,d2		| offset the exponent
X	movel	d1,d3		| get the exponent for b
X	lsrl	d6,d3		| and shift right
X	andl	#0xFF,d2
X	subl	#126,d3		| off set this one too
X
X	clrl	d7		| negative result flag
X	andl	#0x7FFFFF,d0	| mask a for mantissa
X	orl	#0x800000,d0	| and put in hidden bit
X	tstl	a6@(8)		| test the original value
X	bpl	mulsf3_1	| pos, ok
X	eorl	#1,d7		| remember negative
Xmulsf3_1:
X	andl	#0x7FFFFF,d1	| mask b for mantissa
X	orl	#0x800000,d1	| ditto
X	tstl	a6@(12)		| test ...
X	bpl	mulsf3_2
X	eorl	#1,d7
Xmulsf3_2:
X|	lsrl	#8,d1		| shift this one down
X|	lsrl	#8,d0		| this one too...
X|	mulu	d1,d0		| do the multiply
X
X|	beq	mulsf3_ret	| zero? ok, just return
X|	lsrl	#8,d0		| shift right again
X
X|
X| we have mantissas as follows:
X|
X|	|...ah...|...al...|	|...bh...|...bl...|
X|
X| product is composed as:
X|
X|			|....al * bl....|
X|		|....al * bh....|
X|		|....ah * bl....|
X|	|....ah * bh....|
X|
X| then take the 24 bit chunk thats 16 bits in.
X
X	movel	d0,d4
X	andl	#0xFFFF,d4	| al
X	movel	d1,d5
X	andl	#0xFFFF,d5	| bl
X	mulu	d5,d4		| thats al * bl
X	movel	d4,a6@(-4)	| into the accum
X	clrl	a6@(-8)		| zap the top part
X
X	movel	d0,d4
X	andl	#0xFFFF,d4	| al
X	movel	d1,d5
X	movel	#16,d6		| shift count
X	lsrl	d6,d5		| bh
X	mulu	d5,d4		| al * bh
X	addl	d4,a6@(-6)
X
X	movel	d0,d4
X	lsrl	d6,d4		| ah
X	movel	d1,d5
X	andl	#0xFFFF,d5	| bl
X	mulu	d5,d4		| ah * bl
X	addl	d4,a6@(-6)
X
X	movel	d0,d4
X	lsrl	d6,d4		| ah
X	movel	d1,d5
X	lsrl	d6,d5		| bh
X	mulu	d5,d4		| ah * bh
X	addl	d4,a6@(-8)
X
X	movel	a6@(-6),d0	| get the relevant part
X	lsrl	#8,d0		| and shift it down
X
Xmulsf3_norm:
X	btst	#23,d0		| normalized?
X	bne	mulsf3_ok
X	lsll	#1,d0
X	subql	#1,d2
X	bra	mulsf3_norm
X
Xmulsf3_ok:
X	andl	#0x7FFFFF,d0	| zap hidden bit
X	addl	d3,d2		| add Eb to Ea
X	addl	#126,d2		| fix offset
X	andl	#0xFF,d2	| whack to 8 bits
X	movel	#23,d6		| shift count
X	lsll	d6,d2		| shift up to right place
X	orl	d2,d0		| shove it in
X	tstl	d7		| sign neg?
X	beq	mulsf3_ret
X	orl	#0x80000000,d0	| set sign bit
X	bra	mulsf3_ret
Xmulsf3_z:
X	clrl	d0
Xmulsf3_ret:
X	moveml	sp at +,#0x00FC	| snarf back all regs
X	unlk	a6
X	rts			| sigh
X# 
X# #ifdef L_divsf3
X# int
X# _divsf3 (a, b)
X#      union flt_or_int a, b;
X# {
X#   union flt_or_int intify;
X#   return INTIFY (a.f / b.f);
X# }
X# #endif
X
X.text
X	.even
X.globl ___divsf3
X.globl __divsf3
X___divsf3:
X__divsf3:
X|
X| divide.  sort of like mult, exc we do shifts and subtracts to
X| do the division of the mantissa.  resultant exponent is Ea - Eb.
X|
X
X	link	a6,#0		| dont need any locals
X	moveml	#0x3F00,sp at -	| save all data registers
X	movel	a6@(8),d0	| get a
X	movel	a6@(12),d1	| get b
X	movel	#23,d6		| shift count
X	movel	d0,d2		| get the exponent
X	lsrl	d6,d2		| and shift right
X	andl	#0xFF,d2
X	subl	#127,d2		| offset the exponent
X	movel	d1,d3		| get the exponent for b
X	lsrl	d6,d3		| and shift right
X	andl	#0xFF,d3
X	subl	#127,d3		| off set this one too
X
X	clrl	d7		| negative result flag
X	andl	#0x7FFFFF,d0	| mask a for mantissa
X	orl	#0x800000,d0	| and put in hidden bit
X	tstl	a6@(8)		| test the original value
X	bpl	divsf3_1	| pos, ok
X	eorl	#1,d7		| remember negative
Xdivsf3_1:
X	andl	#0x7FFFFF,d1	| mask b for mantissa
X	orl	#0x800000,d1	| ditto
X	tstl	a6@(12)		| test ...
X	bpl	divsf3_2
X	eorl	#1,d7
Xdivsf3_2:
X|
X| for now, kludge.  shift Ma left and Mb right, then do an unsigned divide
X| and shift the result left.  Blech
X|
X
X|	lsrl	#8,d1		| shift this one down
X|	lsll	#7,d0		| this one up
X|	divu	d1,d0		| do the divide
X|	andl	#0xFFFF,d0	| and mask off cruft
X
X|	beq	divsf3_ret	| zero? ok, just return
X|	lsll	#8,d0		| shift left again
X
X| same sort of trick as long divide, exc its easier here, cause
X| the numbers (mantissas) are already bit-aligned.
X
X	clrl	d4		| accumulator
X	movel	#0x800000,d5	| bit
X	lsll	#7,d0		| buy a little extra accuracy...
X	lsll	#7,d1
Xdivsf3_2a:
X	cmpl	d1,d0		| compare dividend to divisor
X	bmi	divsf3_2b	| nope, no bit here
X	orl	d5,d4		| put in the bit
X	subl	d1,d0		| and subtract
Xdivsf3_2b:
X	lsrl	#1,d1		| slide divisor down
X	lsrl	#1,d5		| slide bit down
X	bne	divsf3_2a	| and go round again
X	movel	d4,d0		| leave the result here
X
Xdivsf3_3:
X	btst	#23,d0		| right place yet?
X	bne	divsf3_4
X	lsll	#1,d0
X	subql	#1,d2
X	bra	divsf3_3
Xdivsf3_4:
X	andl	#0x7FFFFF,d0	| zap hidden bit
X	subl	d3,d2		| sub Eb from Ea
X	addl	#127,d2		| fix offset
X	andl	#0xFF,d2	| whack to 8 bits
X	lsll	d6,d2		| shift up to right place
X	orl	d2,d0		| shove it in
X	tstl	d7		| sign neg?
X	beq	divsf3_ret
X	orl	#0x80000000,d0	| set sign bit
Xdivsf3_ret:
X	moveml	sp at +,#0x00FC	| snarf back all regs
X	unlk	a6
X
X	rts			| sigh
X## 
X## #ifdef L_truncdfsf2
X## int
X## _truncdfsf2 (a)
X##      double a;
X## {
X##   union flt_or_int intify;
X##   return INTIFY (a);
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __truncdfsf2
X#__truncdfsf2:
X#	movel	sp@(4),d0
X#	rts
X#
X## 
X## #ifdef L_extendsfdf2
X## double
X## _extendsfdf2 (a)
X##      union flt_or_int a;
X## {
X##   union flt_or_int intify;
X##   return a.f;
X## }
X## #endif
X#
X#.text
X#	.even
X#.globl __extendsfdf2
X#__extendsfdf2:
X#	movel	sp@(4),d0
X#	clrl	d1
X#	rts			| sigh
X#	.even
X#.text
X
SHAR_EOF
true || echo 'restore of sflonum.s failed'
fi
# ============= udivsi3.s ==============
if test -f 'udivsi3.s' -a X"$1" != X"-c"; then
	echo 'x - skipping udivsi3.s (File already exists)'
else
echo 'x - extracting udivsi3.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'udivsi3.s' &&
X	.text			| cjpurcell 30Jul89
X	.even			|
X	.globl __udivsi3	|
X	.globl ___udivsi3	|
X__udivsi3:			|
X___udivsi3:			|
X	link a6,#0		|
X				|
X	movel d2,sp at -		| check our bags
X	movel d3,sp at -		|
X	movel d4,sp at -		|
X				|
X	clrl d0			| d0 will be accum
X	movel a6@(8),d1		| d1 is a, dividend
X	movel a6@(12),d2	| d2 is b, divisor
X	beq udiv_zero		| Woops! divide by 0
X	movel #1,d3		| get a 1 into bit 0 of bit mask
X	movel #31,d4		| d4 is bit num for testing need to sub
Xudiv_sl:			|
X				|
X	btst d4,d2		| bit get to top yet?
X	bne udiv_sr		| yes, start testing for subtraction
X	lsll #1,d3		| shift bitnum left
X	lsll #1,d2		| shift divisor left
X	bra udiv_sl		|
Xudiv_sr:			|
X	cmpl d2,d1		| divisor > dividend?
X	beq udiv_sub		|
X	bcs udiv_s		|
Xudiv_sub:			|
X	subl d2,d1		| take divisor (shifted) out of dividend
X	orl d3,d0		| set this bit in d0
X	tstl d1			| anything left?
X	beq udiv_done		| nope
Xudiv_s:				|
X	subql #1,d4		| dec bitnum (new only)
X	lsrl #1,d2		| shift right one
X	lsrl #1,d3		| and the bitnum too
X	bne udiv_sr		| more bits, keep dividing
X	bra udiv_done		|
Xudiv_zero:			|
X	clrl d0			| just return 0
Xudiv_done:			|
X	movel sp at +,d4		|
X	movel sp at +,d3		| get bags back
X	movel sp at +,d2		|
X				|
X	unlk a6			|
X	rts			|
SHAR_EOF
true || echo 'restore of udivsi3.s failed'
fi
# ============= umodsi3.s ==============
if test -f 'umodsi3.s' -a X"$1" != X"-c"; then
	echo 'x - skipping umodsi3.s (File already exists)'
else
echo 'x - extracting umodsi3.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'umodsi3.s' &&
X	.text			| cjpurcell 30Jul89
X	.even			|
X	.globl __umodsi3	|
X	.globl ___umodsi3	|
X__umodsi3:			|
X___umodsi3:			|
X	link a6,#-4		|
X	movl d2,sp at -		|
X	movel a6@(12),sp at -	|
X	movel a6@(8),sp at -	|
X	jbsr ___udivsi3		|
X	movel d0,d0		|
X	movel a6@(12),sp at -	|
X	movel d0,sp at -		|
X	jbsr ___mulsi3		|
X	movel d0,d2		|
X	movel a6@(8),d1		|
X	subl d2,d1		|
X	movel d1,a6@(-4)	|
X	movel a6@(-4),d0	|
X	movl a6@(-8),d2		|
X	unlk a6			|
X	rts			|
SHAR_EOF
true || echo 'restore of umodsi3.s failed'
fi
# ============= umulsi3.s ==============
if test -f 'umulsi3.s' -a X"$1" != X"-c"; then
	echo 'x - skipping umulsi3.s (File already exists)'
else
echo 'x - extracting umulsi3.s (Text)'
sed 's/^X//' << 'SHAR_EOF' > 'umulsi3.s' &&
X	.text			| cjpurcell 30Jul89
X	.even			|
X	.globl __umulsi3	|
X	.globl ___umulsi3	|
X__umulsi3:			|
X___umulsi3:			|
X	link a6,#0		| 0 bytes of local
X	movel d2,sp at -		| push d2
X	movel a6@(8),d2		| get a
X	beq umul_zero		| 0? return 0
X	movel a6@(12),d1	| get b
X	beq umul_zero		| 0? return 0
X				|
X	mulu d1,d2		| mul bottom parts
X	movel d2,d0		| save that
X	movew a6@(8),d2		| get hi part of a
X	beq umul_1		| zero, skip it
X	movel a6@(12),d1	| get b
X	mulu d1,d2		| mul a hi by b lo
X	lsll #8,d2		| shift over
X	lsll #8,d2		|  ...twice...
X	addl d2,d0		| add that in
Xumul_1:				|
X	clrl d1			|
X	movew a6@(12),d1	| get b hi
X	beq umul_ret		|
X	movel a6@(8),d2		| and a lo
X	mulu d1,d2		| mult those
X	lsll #8,d2		| shift it over
X	lsll #8,d2		|  ... twice...
X	addl d2,d0		| and add that in
X	bra umul_ret		| go home
Xumul_zero:			|
X	clrl d0			| return 0
Xumul_ret:			|
X	movel sp at +,d2		| get d2 back
X				|
X	unlk a6			| flush frame
X	rts			|
SHAR_EOF
true || echo 'restore of umulsi3.s failed'
fi
exit 0



More information about the Alt.sources mailing list