263 lines
10 KiB
Plaintext
263 lines
10 KiB
Plaintext
|
/*=============================================================================
|
||
|
|
||
|
This file is part of FLINT.
|
||
|
|
||
|
FLINT is free software; you can redistribute it and/or modify
|
||
|
it under the terms of the GNU General Public License as published by
|
||
|
the Free Software Foundation; either version 2 of the License, or
|
||
|
(at your option) any later version.
|
||
|
|
||
|
FLINT is distributed in the hope that it will be useful,
|
||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||
|
GNU General Public License for more details.
|
||
|
|
||
|
You should have received a copy of the GNU General Public License
|
||
|
along with FLINT; if not, write to the Free Software
|
||
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||
|
|
||
|
=============================================================================*/
|
||
|
/******************************************************************************
|
||
|
|
||
|
Copyright (C) 2010 Fredrik Johansson
|
||
|
|
||
|
******************************************************************************/
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
Macros
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
MACRO MPN_NORM(a, an)
|
||
|
|
||
|
Normalise \code{(a, an)} so that either \code{an} is zero or
|
||
|
\code{a[an - 1]} is nonzero.
|
||
|
|
||
|
MACRO MPN_SWAP(a, an, b, bn)
|
||
|
|
||
|
Swap \code{(a, an)} and \code{(b, bn)}, i.e. swap pointers and sizes.
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
Utility functions
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
void flint_mpn_debug(mp_srcptr x, mp_size_t xsize)
|
||
|
|
||
|
Prints debug information about \code{(x, xsize)} to \code{stdout}.
|
||
|
In particular, this will print binary representations of all the limbs.
|
||
|
|
||
|
int flint_mpn_zero_p(mp_srcptr x, mp_size_t xsize)
|
||
|
|
||
|
Returns $1$ if all limbs of \code{(x, xsize)} are zero, otherwise $0$.
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
Divisibility
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
int flint_mpn_divisible_1_p(x, xsize, d) (macro)
|
||
|
|
||
|
Expression determining whether \code{(x, xsize)} is divisible by the
|
||
|
\code{mp_limb_t d} which is assumed to be odd-valued and at least~$3$.
|
||
|
|
||
|
This function is implemented as a macro.
|
||
|
|
||
|
mp_size_t flint_mpn_divexact_1(mp_ptr x, mp_size_t xsize, mp_limb_t d)
|
||
|
|
||
|
Divides $x$ once by a known single-limb divisor, returns the new size.
|
||
|
|
||
|
mp_size_t flint_mpn_remove_2exp(mp_ptr x, mp_size_t xsize, mp_bitcnt_t *bits)
|
||
|
|
||
|
Divides \code{(x, xsize)} by $2^n$ where $n$ is the number of trailing
|
||
|
zero bits in $x$. The new size of $x$ is returned, and $n$ is stored in
|
||
|
the bits argument. $x$ may not be zero.
|
||
|
|
||
|
mp_size_t flint_mpn_remove_power_ascending(mp_ptr x, mp_size_t xsize,
|
||
|
mp_ptr p, mp_size_t psize, ulong *exp)
|
||
|
|
||
|
Divides \code{(x, xsize)} by the largest power $n$ of \code{(p, psize)}
|
||
|
that is an exact divisor of $x$. The new size of $x$ is returned, and
|
||
|
$n$ is stored in the \code{exp} argument. $x$ may not be zero, and $p$
|
||
|
must be greater than $2$.
|
||
|
|
||
|
This function works by testing divisibility by ascending squares
|
||
|
$p, p^2, p^4, p^8, \dotsc$, making it efficient for removing potentially
|
||
|
large powers. Because of its high overhead, it should not be used as
|
||
|
the first stage of trial division.
|
||
|
|
||
|
int
|
||
|
flint_mpn_factor_trial(mp_srcptr x, mp_size_t xsize, slong start, slong stop)
|
||
|
|
||
|
Searches for a factor of \code{(x, xsize)} among the primes in positions
|
||
|
\code{start, ..., stop-1} of \code{flint_primes}. Returns $i$ if
|
||
|
\code{flint_primes[i]} is a factor, otherwise returns $0$ if no factor
|
||
|
is found. It is assumed that \code{start >= 1}.
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
Division
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
int flint_mpn_divides(mp_ptr q, mp_srcptr array1,
|
||
|
mp_size_t limbs1, mp_srcptr arrayg, mp_size_t limbsg, mp_ptr temp)
|
||
|
|
||
|
If \code{(arrayg, limbsg)} divides \code{(array1, limbs1)} then
|
||
|
\code{(q, limbs1 - limbsg + 1)} is set to the quotient and 1 is
|
||
|
returned, otherwise 0 is returned. The temporary space \code{temp}
|
||
|
must have space for \code{limbsg} limbs.
|
||
|
|
||
|
Assumes limbs1 \code{limbs1 >= limbsg > 0}.
|
||
|
|
||
|
mp_limb_t flint_mpn_preinv1(mp_limb_t d, mp_limb_t d2)
|
||
|
|
||
|
Computes a precomputed inverse from the leading two limbs of the
|
||
|
divisor \code{b, n} to be used with the \code{preinv1} functions.
|
||
|
We require the most significant bit of \code{b, n} to be 1.
|
||
|
|
||
|
mp_limb_t flint_mpn_divrem_preinv1(mp_ptr q, mp_ptr a,
|
||
|
mp_size_t m, mp_srcptr b, mp_size_t n, mp_limb_t dinv)
|
||
|
|
||
|
Divide \code{a, m} by \code{b, n}, returning the high limb of the
|
||
|
quotient (which will either be 0 or 1), storing the remainder in-place
|
||
|
in \code{a, n} and the rest of the quotient in \code{q, m - n}.
|
||
|
We require the most significant bit of \code{b, n} to be 1.
|
||
|
dinv must be computed from \code{b[n - 1]}, \code{b[n - 2]} by
|
||
|
\code{flint_mpn_preinv1}. We also require \code{m >= n >= 2}.
|
||
|
|
||
|
void flint_mpn_mulmod_preinv1(mp_ptr r,
|
||
|
mp_srcptr a, mp_srcptr b, mp_size_t n,
|
||
|
mp_srcptr d, mp_limb_t dinv, ulong norm)
|
||
|
|
||
|
Given a normalised integer $d$ with precomputed inverse \code{dinv}
|
||
|
provided by \code{flint_mpn_preinv1}, computes $ab \pmod{d}$ and
|
||
|
stores the result in $r$. Each of $a$, $b$ and $r$ is expected to
|
||
|
have $n$ limbs of space, with zero padding if necessary.
|
||
|
|
||
|
The value \code{norm} is provided for convenience. If $a$, $b$ and
|
||
|
$d$ have been shifted left by \code{norm} bits so that $d$ is
|
||
|
normalised, then $r$ will be shifted right by \code{norm} bits
|
||
|
so that it has the same shift as all the inputs.
|
||
|
|
||
|
We require $a$ and $b$ to be reduced modulo $n$ before calling the
|
||
|
function.
|
||
|
|
||
|
void flint_mpn_preinvn(mp_ptr dinv, mp_srcptr d, mp_size_t n)
|
||
|
|
||
|
Compute an $n$ limb precomputed inverse \code{dinv} of the $n$ limb
|
||
|
integer $d$.
|
||
|
|
||
|
We require that $d$ is normalised, i.e. with the most significant
|
||
|
bit of the most significant limb set.
|
||
|
|
||
|
void flint_mpn_mod_preinvn(mp_ptr r, mp_srcptr a, mp_size_t m,
|
||
|
mp_srcptr d, mp_size_t n, mp_srcptr dinv)
|
||
|
|
||
|
Given a normalised integer $d$ of $n$ limbs, with precomputed inverse
|
||
|
\code{dinv} provided by \code{flint_mpn_preinvn} and integer $a$ of $m$
|
||
|
limbs, computes $a \pmod{d}$ and stores the result in-place in the lower
|
||
|
$n$ limbs of $a$. The remaining limbs of $a$ are destroyed.
|
||
|
|
||
|
We require $m \geq n$. No aliasing of $a$ with any of the other operands
|
||
|
is permitted.
|
||
|
|
||
|
Note that this function is not always as fast as ordinary division.
|
||
|
|
||
|
mp_limb_t flint_mpn_divrem_preinvn(mp_ptr q, mp_ptr r, mp_srcptr a,
|
||
|
mp_size_t m, mp_srcptr d, mp_size_t n, mp_srcptr dinv)
|
||
|
|
||
|
Given a normalised integer $d$ with precomputed inverse \code{dinv}
|
||
|
provided by \code{flint_mpn_preinvn}, computes the quotient of $a$ by $d$
|
||
|
and stores the result in $q$ and the remainder in the lower $n$ limbs of
|
||
|
$a$. The remaining limbs of $a$ are destroyed.
|
||
|
|
||
|
The value $q$ is expected to have space for $m - n$ limbs and we require
|
||
|
$m >= n$. No aliasing is permitted between $q$ and $a$ or between these
|
||
|
and any of the other operands.
|
||
|
|
||
|
Note that this function is not always as fast as ordinary division.
|
||
|
|
||
|
void flint_mpn_mulmod_preinvn(mp_ptr r,
|
||
|
mp_srcptr a, mp_srcptr b, mp_size_t n,
|
||
|
mp_srcptr d, mp_srcptr dinv, ulong norm)
|
||
|
|
||
|
Given a normalised integer $d$ with precomputed inverse \code{dinv}
|
||
|
provided by \code{flint_mpn_preinvn}, computes $ab \pmod{d}$ and
|
||
|
stores the result in $r$. Each of $a$, $b$ and $r$ is expected to
|
||
|
have $n$ limbs of space, with zero padding if necessary.
|
||
|
|
||
|
The value \code{norm} is provided for convenience. If $a$, $b$ and
|
||
|
$d$ have been shifted left by \code{norm} bits so that $d$ is
|
||
|
normalised, then $r$ will be shifted right by \code{norm} bits
|
||
|
so that it has the same shift as all the inputs.
|
||
|
|
||
|
We require $a$ and $b$ to be reduced modulo $n$ before calling the
|
||
|
function.
|
||
|
|
||
|
Note that this function is not always as fast as ordinary division.
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
GCD
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
mp_size_t flint_mpn_gcd_full(mp_ptr arrayg,
|
||
|
mp_ptr array1, mp_size_t limbs1, mp_ptr array2, mp_size_t limbs2)
|
||
|
|
||
|
Sets \code{(arrayg, retvalue)} to the gcd of \code{(array1, limbs1)} and
|
||
|
\code{(array2, limbs2)}.
|
||
|
|
||
|
The only assumption is that neither \code{limbs1} or \code{limbs2} is
|
||
|
zero.
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
Special numbers
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
void flint_mpn_harmonic_odd_balanced(mp_ptr t, mp_size_t * tsize,
|
||
|
mp_ptr v, mp_size_t * vsize,
|
||
|
slong a, slong b, slong n, int d)
|
||
|
|
||
|
Computes \code{(t,tsize)} and \code{(v,vsize)} such that
|
||
|
$t/v = H_n = 1 + 1/2 + \dotsb + 1/n$.
|
||
|
The computation is performed using recursive balanced summation
|
||
|
over the odd terms. The resulting fraction will not generally be
|
||
|
normalized. At the top level, this function should be called with
|
||
|
$n > 0$, $a = 1$, $b = n$, and $d = 1$.
|
||
|
|
||
|
Enough space should be allocated for $t$ and $v$ to fit the entire sum
|
||
|
$1 + 1/2 + \dotsb + 1/n$ computed without normalization; i.e.\ $t$ and $v$
|
||
|
should have room to fit $n!$ plus one extra limb.
|
||
|
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
Random Number Generation
|
||
|
|
||
|
*******************************************************************************
|
||
|
|
||
|
void flint_mpn_rrandom(mp_limb_t *rp,
|
||
|
gmp_randstate_t state, mp_size_t n)
|
||
|
|
||
|
Generates a random number with \code{n} limbs and stores
|
||
|
it on \code{rp}. The number it generates will tend to have
|
||
|
long strings of zeros and ones in the binary representation.
|
||
|
|
||
|
Useful for testing functions and algorithms, since this kind of random
|
||
|
numbers have proven to be more likely to trigger corner-case bugs.
|
||
|
|
||
|
|
||
|
void flint_mpn_urandomb(mp_limb_t *rp,
|
||
|
gmp_randstate_t state, mp_bitcnt_t n)
|
||
|
|
||
|
Generates a uniform random number \code{n} bits and stores
|
||
|
it on \code{rp}.
|
||
|
|