o
    Eb                     @  s  d Z ddlmZ ddlZddlZddlZddlZddlZddlm	Z	m
Z
 ddlmZmZmZmZmZmZmZ ddlZerTddlmZ ddlmZ ddlmZmZmZmZ ddlmZ ddlm Z  dd	l!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* dd
l+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2 g dZ3edTdUddZ4edVddZ4dWddZ4dddXdd Z5dd!d"d#dYd*d+Z6dZd/d0Z7d[d6d7Z8d\d9d:Z9d]d<d=Z:	>d^dddd"d?d_dDdEZ;G dFdG dGe	Z<G dHdI dIe<Z=G dJdK dKe<Z>G dLdM dMe<Z?G dNdO dOe<Z@G dPdQ dQe<ZAd`dadRdSZBdS )bz&Quasi-Monte Carlo engines and helpers.    )annotationsN)ABCabstractmethod)CallableClassVarDictListOptionaloverloadTYPE_CHECKING)Literal)DecimalNumberGeneratorType	IntNumberSeedType)rng_integers)	initialize_v
_cscramble_fill_p_cumulative_draw_fast_forward_categorizeinitialize_direction_numbers_MAXDIM_MAXBIT) _cy_wrapper_centered_discrepancy#_cy_wrapper_wrap_around_discrepancy_cy_wrapper_mixture_discrepancy_cy_wrapper_l2_star_discrepancy_cy_wrapper_update_discrepancy_cy_van_der_corput_scrambled_cy_van_der_corput)	scalediscrepancyupdate_discrepancy	QMCEngineSobolHaltonLatinHypercubeMultinomialQMCMultivariateNormalQMC.seedOptional[IntNumber]returnnp.random.Generatorc                 C     d S N r+   r1   r1   2/usr/lib/python3/dist-packages/scipy/stats/_qmc.pycheck_random_state3      r4   r   c                 C  r/   r0   r1   r2   r1   r1   r3   r4   7   r5   c                 C  sN   | du st | tjtjfrtj| S t | tjjtjjfr | S t	| d)aP  Turn `seed` into a `numpy.random.Generator` instance.

    Parameters
    ----------
    seed : {None, int, `numpy.random.Generator`,
            `numpy.random.RandomState`}, optional

        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` or ``RandomState`` instance then
        that instance is used.

    Returns
    -------
    seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
        Random number generator.

    Nz9 cannot be used to seed a numpy.random.Generator instance)

isinstancenumbersZIntegralnpintegerrandomZdefault_rngZRandomState	Generator
ValueErrorr2   r1   r1   r3   r4   =   s
   F)reversesamplenpt.ArrayLikel_boundsu_boundsr=   bool
np.ndarrayc                C  s   t | } t |}t |}| jdkstdt ||\}}t ||k s+tdt|| jd kr8td|sTt | dkrHt | dksLtd| ||  | S t | |krbt | |ksftd| | ||  S )	a  Sample scaling from unit hypercube to different bounds.

    To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
    with :math:`a` the lower bounds and :math:`b` the upper bounds.
    The following transformation is used:

    .. math::

        (b - a) \cdot \text{sample} + a

    Parameters
    ----------
    sample : array_like (n, d)
        Sample to scale.
    l_bounds, u_bounds : array_like (d,)
        Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
        data. If `reverse` is True, range of the original data to transform
        to the unit hypercube.
    reverse : bool, optional
        Reverse the transformation from different bounds to the unit hypercube.
        Default is False.

    Returns
    -------
    sample : array_like (n, d)
        Scaled sample.

    Examples
    --------
    Transform 3 samples in the unit hypercube to bounds:

    >>> from scipy.stats import qmc
    >>> l_bounds = [-2, 0]
    >>> u_bounds = [6, 5]
    >>> sample = [[0.5 , 0.75],
    ...           [0.5 , 0.5],
    ...           [0.75, 0.25]]
    >>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
    >>> sample_scaled
    array([[2.  , 3.75],
           [2.  , 2.5 ],
           [4.  , 1.25]])

    And convert back to the unit hypercube:

    >>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
    >>> sample_
    array([[0.5 , 0.75],
           [0.5 , 0.5 ],
           [0.75, 0.25]])

       Sample is not a 2D arrayzBounds are not consistent a < b   z3Sample dimension is different than bounds dimensionr   Sample is not in unit hypercubezSample is out of bounds)	r8   asarrayZ
atleast_1dndimr<   Zbroadcast_arraysalllenshape)r>   r@   rA   r=   lowerupperr1   r1   r3   r"   Z   s"   
;


r"   CDrF   )	iterativemethodworkersrP   rQ   $Literal['CD', 'WD', 'MD', 'L2-star']rR   r   floatc                C  s   t j| t jdd} | jdkstdt | dkr t | dks$tdt|}ttt	t
d}||v r<|| | ||d	S t|d
t|)a  Discrepancy of a given sample.

    Parameters
    ----------
    sample : array_like (n, d)
        The sample to compute the discrepancy from.
    iterative : bool, optional
        Must be False if not using it for updating the discrepancy.
        Default is False. Refer to the notes for more details.
    method : str, optional
        Type of discrepancy, can be ``CD``, ``WD``, ``MD`` or ``L2-star``.
        Refer to the notes for more details. Default is ``CD``.
    workers : int, optional
        Number of workers to use for parallel processing. If -1 is given all
        CPU threads are used. Default is 1.

    Returns
    -------
    discrepancy : float
        Discrepancy.

    Notes
    -----
    The discrepancy is a uniformity criterion used to assess the space filling
    of a number of samples in a hypercube. A discrepancy quantifies the
    distance between the continuous uniform distribution on a hypercube and the
    discrete uniform distribution on :math:`n` distinct sample points.

    The lower the value is, the better the coverage of the parameter space is.

    For a collection of subsets of the hypercube, the discrepancy is the
    difference between the fraction of sample points in one of those
    subsets and the volume of that subset. There are different definitions of
    discrepancy corresponding to different collections of subsets. Some
    versions take a root mean square difference over subsets instead of
    a maximum.

    A measure of uniformity is reasonable if it satisfies the following
    criteria [1]_:

    1. It is invariant under permuting factors and/or runs.
    2. It is invariant under rotation of the coordinates.
    3. It can measure not only uniformity of the sample over the hypercube,
       but also the projection uniformity of the sample over non-empty
       subset of lower dimension hypercubes.
    4. There is some reasonable geometric meaning.
    5. It is easy to compute.
    6. It satisfies the Koksma-Hlawka-like inequality.
    7. It is consistent with other criteria in experimental design.

    Four methods are available:

    * ``CD``: Centered Discrepancy - subspace involves a corner of the
      hypercube
    * ``WD``: Wrap-around Discrepancy - subspace can wrap around bounds
    * ``MD``: Mixture Discrepancy - mix between CD/WD covering more criteria
    * ``L2-star``: L2-star discrepancy - like CD BUT variant to rotation

    See [2]_ for precise definitions of each method.

    Lastly, using ``iterative=True``, it is possible to compute the
    discrepancy as if we had :math:`n+1` samples. This is useful if we want
    to add a point to a sampling and check the candidate which would give the
    lowest discrepancy. Then you could just update the discrepancy with
    each candidate using `update_discrepancy`. This method is faster than
    computing the discrepancy for a large number of candidates.

    References
    ----------
    .. [1] Fang et al. "Design and modeling for computer experiments".
       Computer Science and Data Analysis Series, 2006.
    .. [2] Zhou Y.-D. et al. Mixture discrepancy for quasi-random point sets.
       Journal of Complexity, 29 (3-4) , pp. 283-301, 2013.
    .. [3] T. T. Warnock. "Computational investigations of low discrepancy
       point sets". Applications of Number Theory to Numerical
       Analysis, Academic Press, pp. 319-343, 1972.

    Examples
    --------
    Calculate the quality of the sample using the discrepancy:

    >>> from scipy.stats import qmc
    >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
    >>> l_bounds = [0.5, 0.5]
    >>> u_bounds = [6.5, 6.5]
    >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
    >>> space
    array([[0.08333333, 0.41666667],
           [0.25      , 0.91666667],
           [0.41666667, 0.25      ],
           [0.58333333, 0.75      ],
           [0.75      , 0.08333333],
           [0.91666667, 0.58333333]])
    >>> qmc.discrepancy(space)
    0.008142039609053464

    We can also compute iteratively the ``CD`` discrepancy by using
    ``iterative=True``.

    >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
    >>> disc_init
    0.04769081147119336
    >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
    0.008142039609053513

    CdtypeZorderrD   rE   r   rF   rG   )rO   ZWDZMDzL2-starrR   z* is not a valid method. It must be one of )r8   rH   float64rI   r<   rJ   _validate_workersr   r   r   r   set)r>   rP   rQ   rR   methodsr1   r1   r3   r#      s    p
r#   x_newinitial_discr   c                 C  s   t j|t jdd}t j| t jdd} |jdkstdt |dkr)t |dks-td| jdks6tdt | dkrDt | dksHtd	| jd |jd krVtd
t| ||S )a  Update the centered discrepancy with a new sample.

    Parameters
    ----------
    x_new : array_like (1, d)
        The new sample to add in `sample`.
    sample : array_like (n, d)
        The initial sample.
    initial_disc : float
        Centered discrepancy of the `sample`.

    Returns
    -------
    discrepancy : float
        Centered discrepancy of the sample composed of `x_new` and `sample`.

    Examples
    --------
    We can also compute iteratively the discrepancy by using
    ``iterative=True``.

    >>> from scipy.stats import qmc
    >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
    >>> l_bounds = [0.5, 0.5]
    >>> u_bounds = [6.5, 6.5]
    >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
    >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
    >>> disc_init
    0.04769081147119336
    >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
    0.008142039609053513

    rU   rV   rD   rE   r   rF   rG   zx_new is not a 1D arrayzx_new is not in unit hypercubez&x_new and sample must be broadcastable)r8   rH   rY   rI   r<   rJ   rL   r   )r]   r>   r^   r1   r1   r3   r$   <  s   %

r$   i1inti2kdiscc                 C  s  | j d }| d }d|d  tjddt||ddf  t| t||ddf |   dd }d|d  tjddt||ddf  t| t||ddf |   dd }d|d  tdt||ddf   d| tddt||ddf   d||ddf d     }	d|d  tdt||ddf   d| tddt||ddf   d||ddf d     }
dt|||f  t|dd|f  t|||f |dd|f   }dt|||f  t|dd|f  t|||f |dd|f   }|| }|| }|| }dt|||f  dt|||f   }dt|||f  dt|||f   }tdt||ddf  }tdt||ddf  }tddt||ddf   d||ddf d   }tddt||ddf   d||ddf d   }|| |d  d| | | |  }||d |  d| || |   }|| | | }tj|td	}d
|||g< t|| }|| |	 | |
 d|  }|S )a  Centered discrepancy after an elementary perturbation of a LHS.

    An elementary perturbation consists of an exchange of coordinates between
    two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
    this operation conserves the LHS properties.

    Parameters
    ----------
    sample : array_like (n, d)
        The sample (before permutation) to compute the discrepancy from.
    i1 : int
        The first line of the elementary permutation.
    i2 : int
        The second line of the elementary permutation.
    k : int
        The column of the elementary permutation.
    disc : float
        Centered discrepancy of the design before permutation.

    Returns
    -------
    discrepancy : float
        Centered discrepancy of the design after permutation.

    References
    ----------
    .. [1] Jin et al. "An efficient algorithm for constructing optimal design
       of computer experiments", Journal of Statistical Planning and
       Inference, 2005.

    r         ?g      ?g       @NrF   ZaxisrD   rW   F)rL   r8   ZprodabsonesrB   sum)r>   r_   ra   rb   rc   nZz_ijZc_i1jZc_i2jZc_i1i1Zc_i2i2ZnumZdenumZgammaZc_p_i1jZc_p_i2jZalphaZbetaZg_i1Zg_i2Zh_i1Zh_i2Zc_p_i1i1Zc_p_i2i2Zsum_maskZdisc_epr1   r1   r3   _perturb_discrepancyx  sj   
!

($($&&((::$$rl   rj   c                 C  s   t j| d | d dk td}tdt| d d d D ],}d| d dB }d||| d dd| < d|||d|d@   d	  d dd| < qt jdddt |d
 dd  d dB f S )a  Prime numbers from 2 to *n*.

    Parameters
    ----------
    n : int
        Sup bound with ``n >= 6``.

    Returns
    -------
    primes : list(int)
        Primes in ``2 <= p < n``.

    Notes
    -----
    Taken from [1]_ by P.T. Roy, written consent given on 23.04.2021
    by the original author, Bruno Astrolino, for free use in SciPy under
    the 3-clause BSD.

    References
    ----------
    .. [1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_.

          rD   rf   rF   rd   FN   r   )r8   rh   rB   ranger`   Zr_Znonzero)rj   Zsieveirb   r1   r1   r3   primes_from_2_to  s   ,.rr   	List[int]c                 C  sR   g dd|  }t || k r'd}	 t|d|  }t || kr"	 |S |d7 }q|S )zList of the n-first prime numbers.

    Parameters
    ----------
    n : int
        Number of prime numbers wanted.

    Returns
    -------
    primes : list(int)
        List of primes.

    )rD   rm                              %   )   +   /   5   ;   =   C   G   I   O   S   Y   a   e   g   k   m   q                                                                           i  i  i  i  i  i  i  i%  i3  i7  i9  i=  iK  iQ  i[  i]  ia  ig  io  iu  i{  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i	  i  i  i#  i-  i3  i9  i;  iA  iK  iQ  iW  iY  i_  ie  ii  ik  iw  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i  i)  i+  i5  i7  i;  i=  iG  iU  iY  i[  i_  im  iq  is  iw  i  i  i  i  i  i  i  i  i  i  i  i  i  i  Ni  zNot enough primesi  )rK   rr   )rj   primesZ
big_numberr1   r1   r3   n_primes  s   r   rD   start_indexscrambler+   rR   baser   r   r   c          
      C  s   |dk rt d|r9t|}tdt| d }tjt|d |dd}|D ]}	||	 q)t	| ||||S t
| |||S )aD  Van der Corput sequence.

    Pseudo-random number generator based on a b-adic expansion.

    Scrambling uses permutations of the remainders (see [1]_). Multiple
    permutations are applied to construct a point. The sequence of
    permutations has to be the same for all points of the sequence.

    Parameters
    ----------
    n : int
        Number of element of the sequence.
    base : int, optional
        Base of the sequence. Default is 2.
    start_index : int, optional
        Index to start the sequence from. Default is 0.
    scramble : bool, optional
        If True, use Owen scrambling. Otherwise no scrambling is done.
        Default is True.
    seed : {None, int, `numpy.random.Generator`}, optional
        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` instance then that instance is
        used.
    workers : int, optional
        Number of workers to use for parallel processing. If -1 is
        given all CPU threads are used. Default is 1.

    Returns
    -------
    sequence : list (n,)
        Sequence of Van der Corput.

    References
    ----------
    .. [1] A. B. Owen. "A randomized Halton algorithm in R",
       arXiv:1706.02808, 2017.

    rD   z'base' must be at least 26   rF   Nr   re   )r<   r4   mathceilZlog2r8   repeatarangeshuffler    r!   )
rj   r   r   r   r+   rR   rngcountpermutationsZpermr1   r1   r3   van_der_corput  s   0r   c                   @  sH   e Zd ZdZedddd
dZeddddZdddZdddZdS )r%   a  A generic Quasi-Monte Carlo sampler class meant for subclassing.

    QMCEngine is a base class to construct a specific Quasi-Monte Carlo
    sampler. It cannot be used directly as a sampler.

    Parameters
    ----------
    d : int
        Dimension of the parameter space.
    seed : {None, int, `numpy.random.Generator`}, optional
        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` instance then that instance is
        used.

    Notes
    -----
    By convention samples are distributed over the half-open interval
    ``[0, 1)``. Instances of the class can access the attributes: ``d`` for
    the dimension; and ``rng`` for the random number generator (used for the
    ``seed``).

    **Subclassing**

    When subclassing `QMCEngine` to create a new sampler,  ``__init__`` and
    ``random`` must be redefined.

    * ``__init__(d, seed=None)``: at least fix the dimension. If the sampler
      does not take advantage of a ``seed`` (deterministic methods like
      Halton), this parameter can be omitted.
    * ``random(n)``: draw ``n`` from the engine and increase the counter
      ``num_generated`` by ``n``.

    Optionally, two other methods can be overwritten by subclasses:

    * ``reset``: Reset the engine to it's original state.
    * ``fast_forward``: If the sequence is deterministic (like Halton
      sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw.

    Examples
    --------
    To create a random sampler based on ``np.random.random``, we would do the
    following:

    >>> from scipy.stats import qmc
    >>> class RandomEngine(qmc.QMCEngine):
    ...     def __init__(self, d, seed=None):
    ...         super().__init__(d=d, seed=seed)
    ...
    ...
    ...     def random(self, n=1):
    ...         self.num_generated += n
    ...         return self.rng.random((n, self.d))
    ...
    ...
    ...     def reset(self):
    ...         super().__init__(d=self.d, seed=self.rng_seed)
    ...         return self
    ...
    ...
    ...     def fast_forward(self, n):
    ...         self.random(n)
    ...         return self

    After subclassing `QMCEngine` to define the sampling strategy we want to
    use, we can create an instance to sample from.

    >>> engine = RandomEngine(2)
    >>> engine.random(5)
    array([[0.22733602, 0.31675834],  # random
           [0.79736546, 0.67625467],
           [0.39110955, 0.33281393],
           [0.59830875, 0.18673419],
           [0.67275604, 0.94180287]])

    We can also reset the state of the generator and resample again.

    >>> _ = engine.reset()
    >>> engine.random(5)
    array([[0.22733602, 0.31675834],  # random
           [0.79736546, 0.67625467],
           [0.39110955, 0.33281393],
           [0.59830875, 0.18673419],
           [0.67275604, 0.94180287]])

    Nr2   dr   r+   r   r-   Nonec                C  s@   t t|t jstd|| _t|| _t	|| _
d| _d S )Nzd must be an integer valuer   )r8   Z
issubdtypetyper9   r<   r   r4   r   copydeepcopyrng_seednum_generated)selfr   r+   r1   r1   r3   __init__  s   

zQMCEngine.__init__rF   rj   rC   c                 C  s   dS )a1  Draw `n` in the half-open interval ``[0, 1)``.

        Parameters
        ----------
        n : int, optional
            Number of samples to generate in the parameter space.
            Default is 1.

        Returns
        -------
        sample : array_like (n, d)
            QMC sample.

        Nr1   r   rj   r1   r1   r3   r:     s    zQMCEngine.randomc                 C  s    t | j}t|| _d| _| S )zReset the engine to base state.

        Returns
        -------
        engine : QMCEngine
            Engine reset to its base state.

        r   )r   r   r   r4   r   r   )r   r+   r1   r1   r3   reset  s   	
zQMCEngine.resetc                 C  s   | j |d | S )a
  Fast-forward the sequence by `n` positions.

        Parameters
        ----------
        n : int
            Number of points to skip in the sequence.

        Returns
        -------
        engine : QMCEngine
            Engine reset to its base state.

        )rj   )r:   r   r1   r1   r3   fast_forward  s   zQMCEngine.fast_forward)r   r   r+   r   r-   r   rF   rj   r   r-   rC   )r-   r%   )rj   r   r-   r%   )	__name__
__module____qualname____doc__r   r   r:   r   r   r1   r1   r1   r3   r%   `  s    X
r%   c                      s>   e Zd ZdZdddd fddZ	ddddddZ  ZS )r'   a
  Halton sequence.

    Pseudo-random number generator that generalize the Van der Corput sequence
    for multiple dimensions. The Halton sequence uses the base-two Van der
    Corput sequence for the first dimension, base-three for its second and
    base-:math:`n` for its n-dimension.

    Parameters
    ----------
    d : int
        Dimension of the parameter space.
    scramble : bool, optional
        If True, use Owen scrambling. Otherwise no scrambling is done.
        Default is True.
    seed : {None, int, `numpy.random.Generator`}, optional
        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` instance then that instance is
        used.

    Notes
    -----
    The Halton sequence has severe striping artifacts for even modestly
    large dimensions. These can be ameliorated by scrambling. Scrambling
    also supports replication-based error estimates and extends
    applicabiltiy to unbounded integrands.

    References
    ----------
    .. [1] Halton, "On the efficiency of certain quasi-random sequences of
       points in evaluating multi-dimensional integrals", Numerische
       Mathematik, 1960.
    .. [2] A. B. Owen. "A randomized Halton algorithm in R",
       arXiv:1706.02808, 2017.

    Examples
    --------
    Generate samples from a low discrepancy sequence of Halton.

    >>> from scipy.stats import qmc
    >>> sampler = qmc.Halton(d=2, scramble=False)
    >>> sample = sampler.random(n=5)
    >>> sample
    array([[0.        , 0.        ],
           [0.5       , 0.33333333],
           [0.25      , 0.66666667],
           [0.75      , 0.11111111],
           [0.125     , 0.44444444]])

    Compute the quality of the sample using the discrepancy criterion.

    >>> qmc.discrepancy(sample)
    0.088893711419753

    If some wants to continue an existing design, extra points can be obtained
    by calling again `random`. Alternatively, you can skip some points like:

    >>> _ = sampler.fast_forward(5)
    >>> sample_continued = sampler.random(n=5)
    >>> sample_continued
    array([[0.3125    , 0.37037037],
           [0.8125    , 0.7037037 ],
           [0.1875    , 0.14814815],
           [0.6875    , 0.48148148],
           [0.4375    , 0.81481481]])

    Finally, samples can be scaled to bounds.

    >>> l_bounds = [0, 2]
    >>> u_bounds = [10, 5]
    >>> qmc.scale(sample_continued, l_bounds, u_bounds)
    array([[3.125     , 3.11111111],
           [8.125     , 4.11111111],
           [1.875     , 2.44444444],
           [6.875     , 3.44444444],
           [4.375     , 4.44444444]])

    TNr   r+   r   r   r   rB   r+   r   r-   r   c                  s*   t  j||d || _t|| _|| _d S )Nr   r+   )superr   r+   r   r   r   r   r   r   r+   	__class__r1   r3   r   K  s   

zHalton.__init__rF   rX   rj   rR   rC   c                  sD   t  fddjD } j 7  _t|j jS )a  Draw `n` in the half-open interval ``[0, 1)``.

        Parameters
        ----------
        n : int, optional
            Number of samples to generate in the parameter space. Default is 1.
        workers : int, optional
            Number of workers to use for parallel processing. If -1 is
            given all CPU threads are used. Default is 1. It becomes faster
            than one worker for `n` greater than :math:`10^3`.

        Returns
        -------
        sample : array_like (n, d)
            QMC sample.

        c              
     s0   g | ]}t  t|jjtjd qS )r   )r   r`   r   r   r   r   r+   ).0Zbdimrj   r   rR   r1   r3   
<listcomp>k  s    

z!Halton.random.<locals>.<listcomp>)rZ   r   r   r8   arrayTreshaper   )r   rj   rR   r>   r1   r   r3   r:   T  s   zHalton.randomr   r   r   rB   r+   r   r-   r   r   )rj   r   rR   r   r-   rC   )r   r   r   r   r   r:   __classcell__r1   r1   r   r3   r'     s    Q
r'   c                      s\   e Zd ZdZdddddd  fddZd!d"ddZd!d"ddZd#d"ddZd$ddZ  Z	S )%r(   a  Latin hypercube sampling (LHS).

    A Latin hypercube sample [1]_ generates :math:`n` points in
    :math:`[0,1)^{d}`. Each univariate marginal distribution is stratified,
    placing exactly one point in :math:`[j/n, (j+1)/n)` for
    :math:`j=0,1,...,n-1`. They are still applicable when :math:`n << d`.

    Parameters
    ----------
    d : int
        Dimension of the parameter space.
    centered : bool, optional
        Center the point within the multi-dimensional grid. Default is False.
    optimization : {None, "random-cd"}, optional
        Whether to use an optimization scheme to construct a LHS.
        Default is None.

        * ``random-cd``: random permutations of coordinates to lower the
          centered discrepancy [5]_. The best design based on the centered
          discrepancy is constantly updated. Centered discrepancy-based
          design shows better space filling robustness toward 2D and 3D
          subprojections compared to using other discrepancy measures [6]_.

        .. versionadded:: 1.8.0

    strength : {1, 2}, optional
        Strength of the LHS. ``strength=1`` produces a plain LHS while
        ``strength=2`` produces an orthogonal array based LHS of strength 2
        [7]_, [8]_. In that case, only ``n=p**2`` points can be sampled,
        with ``p`` a prime number. It also constrains ``d <= p + 1``.
        Default is 1.

        .. versionadded:: 1.8.0

    seed : {None, int, `numpy.random.Generator`}, optional
        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` instance then that instance is
        used.

    Notes
    -----

    When LHS is used for integrating a function :math:`f` over :math:`n`,
    LHS is extremely effective on integrands that are nearly additive [2]_.
    With a LHS of :math:`n` points, the variance of the integral is always
    lower than plain MC on :math:`n-1` points [3]_. There is a central limit
    theorem for LHS on the mean and variance of the integral [4]_, but not
    necessarily for optimized LHS due to the randomization.

    :math:`A` is called an orthogonal array of strength :math:`t` if in each
    n-row-by-t-column submatrix of :math:`A`: all :math:`p^t` possible
    distinct rows occur the same number of times. The elements of :math:`A`
    are in the set :math:`\{0, 1, ..., p-1\}`, also called symbols.
    The constraint that :math:`p` must be a prime number is to allow modular
    arithmetic.

    Strength 1 (plain LHS) brings an advantage over strength 0 (MC) and
    strength 2 is a useful increment over strength 1. Going to strength 3 is
    a smaller increment and scrambled QMC like Sobol', Halton are more
    performant [7]_.

    To create a LHS of strength 2, the orthogonal array :math:`A` is
    randomized by applying a random, bijective map of the set of symbols onto
    itself. For example, in column 0, all 0s might become 2; in column 1,
    all 0s might become 1, etc.
    Then, for each column :math:`i` and symbol :math:`j`, we add a plain,
    one-dimensional LHS of size :math:`p` to the subarray where
    :math:`A^i = j`. The resulting matrix is finally divided by :math:`p`.

    References
    ----------
    .. [1] Mckay et al., "A Comparison of Three Methods for Selecting Values
       of Input Variables in the Analysis of Output from a Computer Code."
       Technometrics, 1979.
    .. [2] M. Stein, "Large sample properties of simulations using Latin
       hypercube sampling." Technometrics 29, no. 2: 143-151, 1987.
    .. [3] A. B. Owen, "Monte Carlo variance of scrambled net quadrature."
       SIAM Journal on Numerical Analysis 34, no. 5: 1884-1910, 1997
    .. [4]  Loh, W.-L. "On Latin hypercube sampling." The annals of statistics
       24, no. 5: 2058-2080, 1996.
    .. [5] Fang et al. "Design and modeling for computer experiments".
       Computer Science and Data Analysis Series, 2006.
    .. [6] Damblin et al., "Numerical studies of space filling designs:
       optimization of Latin Hypercube Samples and subprojection properties."
       Journal of Simulation, 2013.
    .. [7] A. B. Owen , "Orthogonal arrays for computer experiments,
       integration and visualization." Statistica Sinica, 1992.
    .. [8] B. Tang, "Orthogonal Array-Based Latin Hypercubes."
       Journal of the American Statistical Association, 1993.

    Examples
    --------
    Generate samples from a Latin hypercube generator.

    >>> from scipy.stats import qmc
    >>> sampler = qmc.LatinHypercube(d=2)
    >>> sample = sampler.random(n=5)
    >>> sample
    array([[0.1545328 , 0.53664833],  # random
           [0.84052691, 0.06474907],
           [0.52177809, 0.93343721],
           [0.68033825, 0.36265316],
           [0.26544879, 0.61163943]])

    Compute the quality of the sample using the discrepancy criterion.

    >>> qmc.discrepancy(sample)
    0.0196...  # random

    Samples can be scaled to bounds.

    >>> l_bounds = [0, 2]
    >>> u_bounds = [10, 5]
    >>> qmc.scale(sample, l_bounds, u_bounds)
    array([[1.54532796, 3.609945  ],  # random
           [8.40526909, 2.1942472 ],
           [5.2177809 , 4.80031164],
           [6.80338249, 3.08795949],
           [2.65448791, 3.83491828]])

    Use the `optimization` keyword argument to produce a LHS with
    lower discrepancy at higher computational cost.

    >>> sampler = qmc.LatinHypercube(d=2, optimization="random-cd")
    >>> sample = sampler.random(n=5)
    >>> qmc.discrepancy(sample)
    0.0176...  # random

    Use the `strength` keyword argument to produce an orthogonal array based
    LHS of strength 2. In this case, the number of sample points must be the
    square of a prime number.

    >>> sampler = qmc.LatinHypercube(d=2, strength=2)
    >>> sample = sampler.random(n=9)
    >>> qmc.discrepancy(sample)
    0.00526...  # random

    Options could be combined to produce an optimized centered
    orthogonal array based LHS. After optimization, the result would not
    be guaranteed to be of strength 2.

    FrF   N)centeredstrengthoptimizationr+   r   r   r   rB   r   r`   r   Optional[Literal['random-cd']]r+   r   r-   r   c          
   
     s   t  j||d || _| j| jd}z|| | _W n ty3 } z|dt|}t||d }~ww d| j	i}	|  |d urmz|
 }|	| | _W n tyd } z|dt|	}t||d }~ww d| _d| _d S d | _d S )Nr   )rF   rD   z, is not a valid strength. It must be one of z	random-cdz7 is not a valid optimization method. It must be one of d   i'  )r   r   r   _random_random_oa_lhs
lhs_methodKeyErrorr[   r<   
_random_cdrM   optimization_method_n_nochange_n_iters)
r   r   r   r   r   r+   Zlhs_method_strengthexcmessager   r   r1   r3   r     s>   



zLatinHypercube.__init__rj   rC   c                 C  s0   |  |}| jdur| |}|  j|7  _|S )a%  Draw `n` in the half-open interval ``[0, 1)``.

        Parameters
        ----------
        n : int, optional
            Number of samples to generate in the parameter space. Default is 1.

        Returns
        -------
        sample : array_like (n, d)
            LHS sample.

        N)r   r   r   )r   rj   lhsr1   r1   r3   r:   0  s
   


zLatinHypercube.randomc                 C  s|   | j rd}n
| jj|| jfd}ttd|d | jdf}t| jD ]}| j||ddf  q$|j	}|| | }|S )zBase LHS algorithm.rd   )sizerF   N)
r   r   Zuniformr   r8   tiler   rp   r   r   )r   rj   samplespermsrq   r1   r1   r3   r   E  s   zLatinHypercube._randomro   c                 C  s  t |t}|d }|d }t|d }||vs||kr+td|dd d  | j|d kr6tdt j||ftd}t t 	|d}t j
t j| d	d
d	d|ddddf< td|D ]"}t |dddf ||dddf   ||ddd| d f< qct j||ftd}	t|D ]}
| j|}||dd|
f  |	dd|
f< qt j||fd}td| jd| jd}t|D ]2}
t|D ]+}|dd|
f |k}|| }||dd|
f |  |dd|
f |< | }qq|| }|ddd| jf S )z)Orthogonal array based LHS of strength 2.rD   rF   z8n is not the square of a prime number. Close values are Nz*n is too small for d. Must be n > (d-1)**2)rL   rW   )rD   rF   re   r   )rL   )r   r   r   r+   )r8   sqrtZastyper`   rr   r<   r   zerosr   r   stackZmeshgridr   rp   modemptyr   Zpermutationr(   r   r:   Zflattenr   )r   rj   pZn_rowZn_colr   Z	oa_sampleZarraysZp_Z
oa_sample_jr   Zoa_lhs_sampleZ
lhs_enginerb   idxr   r1   r1   r3   r   U  sT   "
(
zLatinHypercube._random_oa_lhsbest_samplec                 C  s4  t |}| jdks|dkrt|| jfS t|}|dkr|S d| jd gd|d gd|d gf}d}d}|| jk r|| jk r|d7 }t| jg|d R  }t| jg|d R  }t| jg|d R  }	t	|||	||}
|
|k r||	|f |||f |||f< ||	|f< |
}d}n|d7 }|| jk r|| jk s?|S )a  Optimal LHS on CD.

        Create a base LHS and do random permutations of coordinates to
        lower the centered discrepancy.
        Because it starts with a normal LHS, it also works with the
        `centered` keyword argument.

        Two stopping criterion are used to stop the algorithm: at most,
        `_n_iters` iterations are performed; or if there is no improvement
        for `_n_nochange` consecutive iterations.
        r   rF   rD   )
rK   r   r8   r   r#   r   r   r   r   rl   )r   r   rj   Z	best_discZboundsZ
n_nochangeZn_iterscolZrow_1Zrow_2rc   r1   r1   r3   r     s:   

zLatinHypercube._random_cd)r   r   r   rB   r   r`   r   r   r+   r   r-   r   r   r   )ro   )r   rC   r-   rC   )
r   r   r   r   r   r:   r   r   r   r   r1   r1   r   r3   r(   u  s     )/r(   c                      s|   e Zd ZU dZeZded< eZded< dddd  fddZ	d!ddZ
d"d#ddZd$ddZd% fddZd&ddZ  ZS )'r&   a  Engine for generating (scrambled) Sobol' sequences.

    Sobol' sequences are low-discrepancy, quasi-random numbers. Points
    can be drawn using two methods:

    * `random_base2`: safely draw :math:`n=2^m` points. This method
      guarantees the balance properties of the sequence.
    * `random`: draw an arbitrary number of points from the
      sequence. See warning below.

    Parameters
    ----------
    d : int
        Dimensionality of the sequence. Max dimensionality is 21201.
    scramble : bool, optional
        If True, use Owen scrambling. Otherwise no scrambling is done.
        Default is True.
    seed : {None, int, `numpy.random.Generator`}, optional
        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` instance then that instance is
        used.

    Notes
    -----
    Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in
    :math:`[0,1)^{d}`. Scrambling them [2]_ makes them suitable for singular
    integrands, provides a means of error estimation, and can improve their
    rate of convergence.

    There are many versions of Sobol' sequences depending on their
    'direction numbers'. This code uses direction numbers from [3]_. Hence,
    the maximum number of dimension is 21201. The direction numbers have been
    precomputed with search criterion 6 and can be retrieved at
    https://web.maths.unsw.edu.au/~fkuo/sobol/.

    .. warning::

       Sobol' sequences are a quadrature rule and they lose their balance
       properties if one uses a sample size that is not a power of 2, or skips
       the first point, or thins the sequence [4]_.

       If :math:`n=2^m` points are not enough then one should take :math:`2^M`
       points for :math:`M>m`. When scrambling, the number R of independent
       replicates does not have to be a power of 2.

       Sobol' sequences are generated to some number :math:`B` of bits.
       After :math:`2^B` points have been generated, the sequence will repeat.
       Currently :math:`B=30`.

    References
    ----------
    .. [1] I. M. Sobol. The distribution of points in a cube and the accurate
       evaluation of integrals. Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
       1967.

    .. [2] Art B. Owen. Scrambling Sobol and Niederreiter-Xing points.
       Journal of Complexity, 14(4):466-489, December 1998.

    .. [3] S. Joe and F. Y. Kuo. Constructing sobol sequences with better
       two-dimensional projections. SIAM Journal on Scientific Computing,
       30(5):2635-2654, 2008.

    .. [4] Art B. Owen. On dropping the first Sobol' point. arXiv 2008.08051,
       2020.

    Examples
    --------
    Generate samples from a low discrepancy sequence of Sobol'.

    >>> from scipy.stats import qmc
    >>> sampler = qmc.Sobol(d=2, scramble=False)
    >>> sample = sampler.random_base2(m=3)
    >>> sample
    array([[0.   , 0.   ],
           [0.5  , 0.5  ],
           [0.75 , 0.25 ],
           [0.25 , 0.75 ],
           [0.375, 0.375],
           [0.875, 0.875],
           [0.625, 0.125],
           [0.125, 0.625]])

    Compute the quality of the sample using the discrepancy criterion.

    >>> qmc.discrepancy(sample)
    0.013882107204860938

    To continue an existing design, extra points can be obtained
    by calling again `random_base2`. Alternatively, you can skip some
    points like:

    >>> _ = sampler.reset()
    >>> _ = sampler.fast_forward(4)
    >>> sample_continued = sampler.random_base2(m=2)
    >>> sample_continued
    array([[0.375, 0.375],
           [0.875, 0.875],
           [0.625, 0.125],
           [0.125, 0.625]])

    Finally, samples can be scaled to bounds.

    >>> l_bounds = [0, 2]
    >>> u_bounds = [10, 5]
    >>> qmc.scale(sample_continued, l_bounds, u_bounds)
    array([[3.75 , 3.125],
           [8.75 , 4.625],
           [6.25 , 2.375],
           [1.25 , 3.875]])

    zClassVar[int]MAXDIMMAXBITTNr   r   r   r   rB   r+   r   r-   r   c                  s   t  j||d || jkrtd| jt  tj|| jft	d| _
t| j
| |s4tj|t	d| _n|   | j | _| jd| j  dd| _d S )Nr   z'Maximum supported dimensionality is {}.rf   rD   rF   r   )r   r   r   r<   formatr   r8   r   r   r`   _svr   _shift	_scrambler   _quasir   _first_pointr   r   r1   r3   r   +  s   

zSobol.__init__c                 C  s~   t t| jd| j| jftddt j| jtd | _| j	 | _
t t| jd| j| j| jftd}t| j|| j d| _dS )zScramble the sequence.rD   )r   rW   rf   r   N)r8   dotr   r   r   r   r`   r   r   r   r   Ztrilr   r   r   )r   Zltmr1   r1   r3   r   D  s   
zSobol._scramblerF   rj   rC   c                 C  s   t j|| jftd}| jdkr@||d @ dkstd |dkr$| j}n+t|d | j| j| j	| j
| t | j|gd| }nt|| jd | j| j	| j
| |  j|7  _|S )a$  Draw next point(s) in the Sobol' sequence.

        Parameters
        ----------
        n : int, optional
            Number of samples to generate in the parameter space. Default is 1.

        Returns
        -------
        sample : array_like (n, d)
            Sobol' sample.

        rf   r   rF   zEThe balance properties of Sobol' points require n to be a power of 2.N)r8   r   r   rT   r   warningswarnr   r   r   r   Zconcatenate)r   rj   r>   r1   r1   r3   r:   S  s   

zSobol.randommc                 C  s@   d| }| j | }||d @ dkstd| j ||| |S )a  Draw point(s) from the Sobol' sequence.

        This function draws :math:`n=2^m` points in the parameter space
        ensuring the balance properties of the sequence.

        Parameters
        ----------
        m : int
            Logarithm in base 2 of the number of samples; i.e., n = 2^m.

        Returns
        -------
        sample : array_like (n, d)
            Sobol' sample.

        rD   rF   r   zThe balance properties of Sobol' points require n to be a power of 2. {0} points have been previously generated, then: n={0}+2**{1}={2}. If you still want to do this, the function 'Sobol.random()' can be used.)r   r<   r   r:   )r   r  rj   Ztotal_nr1   r1   r3   random_base2v  s   

zSobol.random_base2c                   s   t    | j | _| S )zReset the engine to base state.

        Returns
        -------
        engine : Sobol
            Engine reset to its base state.

        )r   r   r   r   r   r   r   r1   r3   r     s   
	zSobol.resetc                 C  sV   | j dkrt|d | j | j| j| j nt|| j d | j| j| j |  j |7  _ | S )a  Fast-forward the sequence by `n` positions.

        Parameters
        ----------
        n : int
            Number of points to skip in the sequence.

        Returns
        -------
        engine : Sobol
            The fast-forwarded engine.

        r   rF   )r   r   r   r   r   r   r1   r1   r3   r     s   
zSobol.fast_forwardr   )r-   r   r   r   )r  r   r-   rC   )r-   r&   )rj   r   r-   r&   )r   r   r   r   r   r   __annotations__r   r   r   r   r:   r  r   r   r   r1   r1   r   r3   r&     s   
 r

#r&   c                      sb   e Zd ZdZ	d!dddddd" fddZd#d$ddZd% fddZd&ddZd#d$dd Z  Z	S )'r*   ai  QMC sampling from a multivariate Normal :math:`N(\mu, \Sigma)`.

    Parameters
    ----------
    mean : array_like (d,)
        The mean vector. Where ``d`` is the dimension.
    cov : array_like (d, d), optional
        The covariance matrix. If omitted, use `cov_root` instead.
        If both `cov` and `cov_root` are omitted, use the identity matrix.
    cov_root : array_like (d, d'), optional
        A root decomposition of the covariance matrix, where ``d'`` may be less
        than ``d`` if the covariance is not full rank. If omitted, use `cov`.
    inv_transform : bool, optional
        If True, use inverse transform instead of Box-Muller. Default is True.
    engine : QMCEngine, optional
        Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
    seed : {None, int, `numpy.random.Generator`}, optional
        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` instance then that instance is
        used.

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> from scipy.stats import qmc
    >>> engine = qmc.MultivariateNormalQMC(mean=[0, 5], cov=[[1, 0], [0, 1]])
    >>> sample = engine.random(512)
    >>> _ = plt.scatter(sample[:, 0], sample[:, 1])
    >>> plt.show()

    NT)cov_rootinv_transformenginer+   meanr?   covOptional[npt.ArrayLike]r  r  rB   r	  Optional[QMCEngine]r+   r   r-   r   c                  s  t j|ddd}|jd }|d urlt j|ddd}|jd |jd ks'tdt || s3tdz
t j| }W nH t jjyk   t j	|\}}	t 
|dksYtd	t |d
d }|	t |  }Y nw |d urt |}|jd |jd kstdnd }t j||d || _|sdt|d  }
n|}
|d u rt|
d|d| _nt|tr|j|krtd|| _ntd|| _|| _d S )NFrF   r   Zndminr   rD   z/Dimension mismatch between mean and covariance.z#Covariance matrix is not symmetric.g:0yEzCovariance matrix not PSD.g        r   Tr   r   r+   zPDimension of `engine` must be consistent with dimensions of mean and covariance.F`engine` must be an instance of `scipy.stats.qmc.QMCEngine` or `None`.)r8   r   rL   r<   ZallcloseZ	transposeZlinalgZcholeskyZLinAlgErrorZeighrJ   Zclipr   Z
atleast_2dr   r   _inv_transformr   r   r&   r	  r6   r%   r   _mean_corr_matrix)r   r
  r  r  r  r	  r+   r   ZeigvalZeigvecZ
engine_dimr   r1   r3   r     sJ   




zMultivariateNormalQMC.__init__rF   rj   r   rC   c                 C  s"   |  |}|  j|7  _| |S )a%  Draw `n` QMC samples from the multivariate Normal.

        Parameters
        ----------
        n : int, optional
            Number of samples to generate in the parameter space. Default is 1.

        Returns
        -------
        sample : array_like (n, d)
            Sample.

        )_standard_normal_samplesr   
_correlate)r   rj   base_samplesr1   r1   r3   r:     s   

zMultivariateNormalQMC.randomc                      t    | j  | S )zReset the engine to base state.

        Returns
        -------
        engine : MultivariateNormalQMC
            Engine reset to its base state.

        r   r   r	  r  r   r1   r3   r   *     
	
zMultivariateNormalQMC.resetr  c                 C  s$   | j d ur|| j  | j S || j S r0   )r  r  )r   r  r1   r1   r3   r  7  s   

z MultivariateNormalQMC._correlatec           	      C  s   | j |}| jrtjdd|d   S td|jd d}t	dt
|dd|f  }dtj |ddd| f  }t|}t|}t|| || gd|d}|ddd| jf S )	a3  Draw `n` QMC samples from the standard Normal :math:`N(0, I_d)`.

        Parameters
        ----------
        n : int, optional
            Number of samples to generate in the parameter space. Default is 1.

        Returns
        -------
        sample : array_like (n, d)
            Sample.

        rd   gA?r   r   rD   r   NrF   )r	  r:   r  statsZnormZppfr8   r   rL   r   logr   Zpicossinr   r   r   )	r   rj   r   ZevenZRsZthetasr  r  Ztransf_samplesr1   r1   r3   r  >  s    

z.MultivariateNormalQMC._standard_normal_samplesr0   )r
  r?   r  r  r  r  r  rB   r	  r  r+   r   r-   r   r   r   )r-   r*   )r  rC   r-   rC   )
r   r   r   r   r   r:   r   r  r  r   r1   r1   r   r3   r*     s    #<
r*   c                      sD   e Zd ZdZdddd fddZddddZd fddZ  ZS )r)   a:  QMC sampling from a multinomial distribution.

    Parameters
    ----------
    pvals : array_like (k,)
        Vector of probabilities of size ``k``, where ``k`` is the number
        of categories. Elements must be non-negative and sum to 1.
    engine : QMCEngine, optional
        Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
    seed : {None, int, `numpy.random.Generator`}, optional
        If `seed` is None the `numpy.random.Generator` singleton is used.
        If `seed` is an int, a new ``Generator`` instance is used,
        seeded with `seed`.
        If `seed` is already a ``Generator`` instance then that instance is
        used.

    Examples
    --------
    >>> from scipy.stats import qmc
    >>> engine = qmc.MultinomialQMC(pvals=[0.2, 0.4, 0.4])
    >>> sample = engine.random(10)

    N)r	  r+   pvalsr?   r	  r  r+   r   r-   r   c                  s   t j|ddd| _t |dk rtdt t |ds!td|d u r.tdd|d| _nt	|t
r@|jdkr<td	|| _ntd
t jd|d d S )NFrF   r  r   z'Elements of pvals must be non-negative.z Elements of pvals must sum to 1.Tr  z Dimension of `engine` must be 1.r  r   )r8   r   r  minr<   Ziscloseri   r&   r	  r6   r%   r   r   r   )r   r  r	  r+   r   r1   r3   r   x  s   

zMultinomialQMC.__init__rF   rj   r   rC   c                 C  sd   | j | }tj| jtd}ttj| jtd| tj	| jt
d}t||| |  j|7  _|S )aJ  Draw `n` QMC samples from the multinomial distribution.

        Parameters
        ----------
        n : int, optional
            Number of samples to generate in the parameter space. Default is 1.

        Returns
        -------
        samples : array_like (pvals,)
            Vector of size ``p`` summing to `n`.

        rf   )r	  r:   Zravelr8   Z
empty_liker  rT   r   r   Z
zeros_liker`   r   r   )r   rj   Z
base_drawsZp_cumulativer>   r1   r1   r3   r:     s   zMultinomialQMC.randomc                   r  )zReset the engine to base state.

        Returns
        -------
        engine : MultinomialQMC
            Engine reset to its base state.

        r  r  r   r1   r3   r     r  zMultinomialQMC.reset)r  r?   r	  r  r+   r   r-   r   r   r   )r-   r)   )r   r   r   r   r   r:   r   r   r1   r1   r   r3   r)   _  s    r)   c                 C  sH   t | } | dkrt } | du rtd| S | dkr"td|  d| S )a@  Validate `workers` based on platform and value.

    Parameters
    ----------
    workers : int, optional
        Number of workers to use for parallel processing. If -1 is
        given all CPU threads are used. Default is 1.

    Returns
    -------
    Workers : int
        Number of CPU used by the algorithm

    r   NzaCannot determine the number of cpus using os.cpu_count(), cannot use -1 for the number of workersr   zInvalid number of workers: z, must be -1 or > 0)r`   os	cpu_countNotImplementedErrorr<   rX   r1   r1   r3   rZ     s   rZ   ).)r+   r,   r-   r.   )r+   r   r-   r   r0   )
r>   r?   r@   r?   rA   r?   r=   rB   r-   rC   )
r>   r?   rP   rB   rQ   rS   rR   r   r-   rT   )r]   r?   r>   r?   r^   r   r-   rT   )
r>   rC   r_   r`   ra   r`   rb   r`   rc   rT   )rj   r`   r-   rC   )rj   r   r-   rs   )rD   )rj   r   r   r   r   r   r   rB   r+   r   rR   r   r-   rC   r   )rR   r   r-   r   )Cr   Z
__future__r   r   r   r7   r   r  abcr   r   typingr   r   r   r   r	   r
   r   Znumpyr8   Znumpy.typingZnptZtyping_extensionsr   Zscipy._lib._utilr   r   r   r   Zscipy.statsr  r   Zscipy.stats._sobolr   r   r   r   r   r   r   r   r   Zscipy.stats._qmc_cyr   r   r   r   r   r    r!   __all__r4   r"   r#   r$   rl   rr   r   r   r%   r'   r(   r&   r*   r)   rZ   r1   r1   r1   r3   <module>   sn    $
,$
"\ 


<
Z
 )G {  B   'R