o
    US`c/k                     @   sb  d dl Z d dlZd dlZddlmZ ddlmZmZ ddlmZ ej	ddG dd	 d	Z
ej	d
d
d
ddG dd dedZG dd dZej	ddG dd dZG dd deedZG dd deedZej	ddG dd dZej	d
d
d
dG dd deZG dd deedZG dd deedZej	ddG d d! d!ZG d"d# d#eedZdS )$    N   )_core)enable_ki_protection
ParkingLot)FinalT)frozenc                   @   s   e Zd Ze ZdS )_EventStatisticsN)__name__
__module____qualname__attribtasks_waiting r   r   5/usr/local/lib/python3.10/dist-packages/trio/_sync.pyr      s    r   F)repreqhashslotsc                   @   sP   e Zd ZdZejeddZejdddZdd Z	e
dd Zd	d
 Zdd ZdS )Eventa  A waitable boolean value useful for inter-task synchronization,
    inspired by :class:`threading.Event`.

    An event object has an internal boolean flag, representing whether
    the event has happened yet. The flag is initially False, and the
    :meth:`wait` method waits until the flag is True. If the flag is
    already True, then :meth:`wait` returns immediately. (If the event has
    already happened, there's nothing to wait for.) The :meth:`set` method
    sets the flag to True, and wakes up any waiters.

    This behavior is useful because it helps avoid race conditions and
    lost wakeups: it doesn't matter whether :meth:`set` gets called just
    before or after :meth:`wait`. If you want a lower-level wakeup
    primitive that doesn't have this protection, consider :class:`Condition`
    or :class:`trio.lowlevel.ParkingLot`.

    .. note:: Unlike `threading.Event`, `trio.Event` has no
       `~threading.Event.clear` method. In Trio, once an `Event` has happened,
       it cannot un-happen. If you need to represent a series of events,
       consider creating a new `Event` object for each one (they're cheap!),
       or other synchronization methods like :ref:`channels <channels>` or
       `trio.lowlevel.ParkingLot`.

    Ffactoryinitdefaultr   c                 C      | j S )z.Return the current value of the internal flag.)_flagselfr   r   r   is_set/   s   zEvent.is_setc                 C   s4   | j sd| _ | jD ]}t| q	| j  dS dS )z@Set the internal flag value to True, and wake any waiting tasks.TN)r   _tasksr   Z
rescheduleclearr   taskr   r   r   set3   s   
z	Event.setc                    sR    j rtj I dH  dS t  j  fdd}t|I dH  dS )zBlock until the internal flag value becomes True.

        If it's already True, then this method returns immediately.

        Nc                    s    j  tjjS N)r    remover   ZAbortZ	SUCCEEDED)_r"   r   r   abort_fnH   s   zEvent.wait.<locals>.abort_fn)	r   triolowlevel
checkpointr   current_taskr    addZwait_task_rescheduled)r   r(   r   r"   r   wait<   s   z
Event.waitc                 C   s   t t| jdS )zReturn an object containing debugging information.

        Currently the following fields are defined:

        * ``tasks_waiting``: The number of tasks blocked on this event's
          :meth:`wait` method.

        )r   )r   lenr    r   r   r   r   
statisticsN   s   	zEvent.statisticsN)r	   r
   r   __doc__r   r   r$   r    r   r   r   r.   r0   r   r   r   r   r      s    
r   )	metaclassc                   @   s$   e Zd Zedd Zedd ZdS )AsyncContextManagerMixinc                    s   |   I d H  d S r%   )acquirer   r   r   r   
__aenter__[   s   z#AsyncContextManagerMixin.__aenter__c                    s   |    d S r%   )release)r   argsr   r   r   	__aexit___   s   z"AsyncContextManagerMixin.__aexit__N)r	   r
   r   r   r5   r8   r   r   r   r   r3   Z   s
    
r3   c                   @   s,   e Zd Ze Ze Ze Ze ZdS )_CapacityLimiterStatisticsN)	r	   r
   r   r   r   borrowed_tokenstotal_tokens	borrowersr   r   r   r   r   r9   d   s
    r9   c                   @   s   e Zd ZdZdd Zdd Zedd Zejdd Zd	d
 Z	edd Z
edd Zedd Zedd Zedd Zedd Zedd Zedd Zdd ZdS )CapacityLimitera\	  An object for controlling access to a resource with limited capacity.

    Sometimes you need to put a limit on how many tasks can do something at
    the same time. For example, you might want to use some threads to run
    multiple blocking I/O operations in parallel... but if you use too many
    threads at once, then your system can become overloaded and it'll actually
    make things slower. One popular solution is to impose a policy like "run
    up to 40 threads at the same time, but no more". But how do you implement
    a policy like this?

    That's what :class:`CapacityLimiter` is for. You can think of a
    :class:`CapacityLimiter` object as a sack that starts out holding some fixed
    number of tokens::

       limit = trio.CapacityLimiter(40)

    Then tasks can come along and borrow a token out of the sack::

       # Borrow a token:
       async with limit:
           # We are holding a token!
           await perform_expensive_operation()
       # Exiting the 'async with' block puts the token back into the sack

    And crucially, if you try to borrow a token but the sack is empty, then
    you have to wait for another task to finish what it's doing and put its
    token back first before you can take it and continue.

    Another way to think of it: a :class:`CapacityLimiter` is like a sofa with a
    fixed number of seats, and if they're all taken then you have to wait for
    someone to get up before you can sit down.

    By default, :func:`trio.to_thread.run_sync` uses a
    :class:`CapacityLimiter` to limit the number of threads running at once;
    see `trio.to_thread.current_default_thread_limiter` for details.

    If you're familiar with semaphores, then you can think of this as a
    restricted semaphore that's specialized for one common use case, with
    additional error checking. For a more traditional semaphore, see
    :class:`Semaphore`.

    .. note::

       Don't confuse this with the `"leaky bucket"
       <https://en.wikipedia.org/wiki/Leaky_bucket>`__ or `"token bucket"
       <https://en.wikipedia.org/wiki/Token_bucket>`__ algorithms used to
       limit bandwidth usage on networks. The basic idea of using tokens to
       track a resource limit is similar, but this is a very simple sack where
       tokens aren't automatically created or destroyed over time; they're
       just borrowed and then put back.

    c                 C   s.   t  | _t | _i | _|| _| j|ksJ d S r%   )r   _lotr$   
_borrowers_pending_borrowersr;   _total_tokens)r   r;   r   r   r   __init__   s
   zCapacityLimiter.__init__c                 C   s"   d t| t| j| jt| jS )Nz6<trio.CapacityLimiter at {:#x}, {}/{} with {} waiting>)formatidr/   r?   rA   r>   r   r   r   r   __repr__   s   zCapacityLimiter.__repr__c                 C   r   )a  The total capacity available.

        You can change :attr:`total_tokens` by assigning to this attribute. If
        you make it larger, then the appropriate number of waiting tasks will
        be woken immediately to take the new tokens. If you decrease
        total_tokens below the number of tasks that are currently using the
        resource, then all current tasks will be allowed to finish as normal,
        but no new tasks will be allowed in until the total number of tasks
        drops below the new total_tokens.

        )rA   r   r   r   r   r;      s   zCapacityLimiter.total_tokensc                 C   s>   t |ts|tjkrtd|dk rtd|| _|   d S )Nz'total_tokens must be an int or math.infr   ztotal_tokens must be >= 1)
isinstanceintmathinf	TypeError
ValueErrorrA   _wake_waiters)r   Znew_total_tokensr   r   r   r;      s   c                 C   s<   | j t| j }| jj|dD ]}| j| j| qd S )Ncount)rA   r/   r?   r>   unparkr-   r@   pop)r   	availableZwokenr   r   r   rL      s   zCapacityLimiter._wake_waitersc                 C   s
   t | jS )z/The amount of capacity that's currently in use.)r/   r?   r   r   r   r   r:      s   
zCapacityLimiter.borrowed_tokensc                 C   s   | j | j S )z/The amount of capacity that's available to use.)r;   r:   r   r   r   r   available_tokens   s   z CapacityLimiter.available_tokensc                 C      |  tj  dS )zBorrow a token from the sack, without blocking.

        Raises:
          WouldBlock: if no tokens are available.
          RuntimeError: if the current task already holds one of this sack's
              tokens.

        N)acquire_on_behalf_of_nowaitr)   r*   r,   r   r   r   r   acquire_nowait   s   
zCapacityLimiter.acquire_nowaitc                 C   s>   || j v r	tdt| j | jk r| js| j | dS tj)a  Borrow a token from the sack on behalf of ``borrower``, without
        blocking.

        Args:
          borrower: A :class:`trio.lowlevel.Task` or arbitrary opaque object
             used to record who is borrowing this token. This is used by
             :func:`trio.to_thread.run_sync` to allow threads to "hold
             tokens", with the intention in the future of using it to `allow
             deadlock detection and other useful things
             <https://github.com/python-trio/trio/issues/182>`__

        Raises:
          WouldBlock: if no tokens are available.
          RuntimeError: if ``borrower`` already holds one of this sack's
              tokens.

        zEthis borrower is already holding one of this CapacityLimiter's tokensN)r?   RuntimeErrorr/   rA   r>   r-   r)   
WouldBlockr   borrowerr   r   r   rT      s   
z+CapacityLimiter.acquire_on_behalf_of_nowaitc                    s   |  tj I dH  dS )zBorrow a token from the sack, blocking if necessary.

        Raises:
          RuntimeError: if the current task already holds one of this sack's
              tokens.

        N)acquire_on_behalf_ofr)   r*   r,   r   r   r   r   r4      s   	zCapacityLimiter.acquirec                    s   t j I dH  z| | W n. t jy>   t j }|| j|< z| j I dH  W Y dS  t j	y=   | j
|  w w t j I dH  dS )a  Borrow a token from the sack on behalf of ``borrower``, blocking if
        necessary.

        Args:
          borrower: A :class:`trio.lowlevel.Task` or arbitrary opaque object
             used to record who is borrowing this token; see
             :meth:`acquire_on_behalf_of_nowait` for details.

        Raises:
          RuntimeError: if ``borrower`` task already holds one of this sack's
             tokens.

        N)r)   r*   checkpoint_if_cancelledrT   rW   r,   r@   r>   parkZ	CancelledrP   cancel_shielded_checkpoint)r   rY   r#   r   r   r   rZ     s   

	z$CapacityLimiter.acquire_on_behalf_ofc                 C   rS   )zPut a token back into the sack.

        Raises:
          RuntimeError: if the current task has not acquired one of this
              sack's tokens.

        N)release_on_behalf_ofr)   r*   r,   r   r   r   r   r6   (  s   	zCapacityLimiter.releasec                 C   s*   || j vr	td| j | |   dS )zPut a token back into the sack on behalf of ``borrower``.

        Raises:
          RuntimeError: if the given borrower has not acquired one of this
              sack's tokens.

        z@this borrower isn't holding any of this CapacityLimiter's tokensN)r?   rV   r&   rL   rX   r   r   r   r^   3  s   
	z$CapacityLimiter.release_on_behalf_ofc                 C   s$   t t| j| jt| jt| jdS )a  Return an object containing debugging information.

        Currently the following fields are defined:

        * ``borrowed_tokens``: The number of tokens currently borrowed from
          the sack.
        * ``total_tokens``: The total number of tokens in the sack. Usually
          this will be larger than ``borrowed_tokens``, but it's possibly for
          it to be smaller if :attr:`total_tokens` was recently decreased.
        * ``borrowers``: A list of all tasks or other entities that currently
          hold a token.
        * ``tasks_waiting``: The number of tasks blocked on this
          :class:`CapacityLimiter`'s :meth:`acquire` or
          :meth:`acquire_on_behalf_of` methods.

        )r:   r;   r<   r   )r9   r/   r?   rA   listr>   r   r   r   r   r0   C  s   zCapacityLimiter.statisticsN)r	   r
   r   r1   rB   rE   propertyr;   setterrL   r:   rR   r   rU   rT   r4   rZ   r6   r^   r0   r   r   r   r   r=   l   s4    5	











r=   c                   @   sj   e Zd ZdZddddZdd Zedd	 Zed
d Ze	dd Z
e	dd Ze	dd Zdd ZdS )	Semaphoreu  A `semaphore <https://en.wikipedia.org/wiki/Semaphore_(programming)>`__.

    A semaphore holds an integer value, which can be incremented by
    calling :meth:`release` and decremented by calling :meth:`acquire` – but
    the value is never allowed to drop below zero. If the value is zero, then
    :meth:`acquire` will block until someone calls :meth:`release`.

    If you're looking for a :class:`Semaphore` to limit the number of tasks
    that can access some resource simultaneously, then consider using a
    :class:`CapacityLimiter` instead.

    This object's interface is similar to, but different from, that of
    :class:`threading.Semaphore`.

    A :class:`Semaphore` object can be used as an async context manager; it
    blocks on entry but not on exit.

    Args:
      initial_value (int): A non-negative integer giving semaphore's initial
        value.
      max_value (int or None): If given, makes this a "bounded" semaphore that
        raises an error if the value is about to exceed the given
        ``max_value``.

    N)	max_valuec                C   sh   t |ts	td|dk rtd|d ur&t |tstd||k r&tdtj | _|| _|| _	d S )Nzinitial_value must be an intr   zinitial value must be >= 0z max_value must be None or an intz#max_values must be >= initial_value)
rF   rG   rJ   rK   r)   r*   r   r>   _value
_max_value)r   initial_valuerc   r   r   r   rB   y  s   


zSemaphore.__init__c                 C   s0   | j d u rd}nd| j }d| j|t| S )N z, max_value={}z<trio.Semaphore({}{}) at {:#x}>)re   rC   rd   rD   )r   Zmax_value_strr   r   r   rE     s   
zSemaphore.__repr__c                 C   r   )z#The current value of the semaphore.)rd   r   r   r   r   value     zSemaphore.valuec                 C   r   )z<The maximum allowed value. May be None to indicate no limit.)re   r   r   r   r   rc     ri   zSemaphore.max_valuec                 C   s,   | j dkr| jr
J |  j d8  _ dS tj)zAttempt to decrement the semaphore value, without blocking.

        Raises:
          WouldBlock: if the value is zero.

        r   r   N)rd   r>   r)   rW   r   r   r   r   rU     s   

zSemaphore.acquire_nowaitc                    Z   t j I dH  z|   W n t jy"   | j I dH  Y dS w t j I dH  dS )zkDecrement the semaphore value, blocking if necessary to avoid
        letting it drop below zero.

        Nr)   r*   r[   rU   rW   r>   r\   r]   r   r   r   r   r4     s   zSemaphore.acquirec                 C   sV   | j r| jdks
J | j jdd dS | jdur"| j| jkr"td|  jd7  _dS )zIncrement the semaphore value, possibly waking a task blocked in
        :meth:`acquire`.

        Raises:
          ValueError: if incrementing the value would cause it to exceed
              :attr:`max_value`.

        r   r   rM   Nz!semaphore released too many times)r>   rd   rO   re   rK   r   r   r   r   r6     s   
zSemaphore.releasec                 C   
   | j  S )zReturn an object containing debugging information.

        Currently the following fields are defined:

        * ``tasks_waiting``: The number of tasks blocked on this semaphore's
          :meth:`acquire` method.

        )r>   r0   r   r   r   r   r0     s   
	zSemaphore.statistics)r	   r
   r   r1   rB   rE   r`   rh   rc   r   rU   r4   r6   r0   r   r   r   r   rb   ^  s    	




rb   c                   @   s$   e Zd Ze Ze Ze ZdS )_LockStatisticsN)r	   r
   r   r   r   lockedownerr   r   r   r   r   rm     s    rm   )r   r   r   c                   @   sd   e Zd ZejeddZejdddZdd Zdd Z	e
d	d
 Ze
dd Ze
dd Zdd ZdS )	_LockImplFr   Nr   c                 C   s>   |   rd}dt| j}nd}d}d|| jjt| |S )Nrn   z with {} waitersunlockedrg   z<{} {} object at {:#x}{}>)rn   rC   r/   r>   	__class__r	   rD   )r   s1s2r   r   r   rE     s   z_LockImpl.__repr__c                 C   s
   | j duS )zCheck whether the lock is currently held.

        Returns:
          bool: True if the lock is held, False otherwise.

        N)_ownerr   r   r   r   rn        
z_LockImpl.lockedc                 C   s<   t j }| j|u rtd| jdu r| js|| _dS t j)ztAttempt to acquire the lock, without blocking.

        Raises:
          WouldBlock: if the lock is held.

        z*attempt to re-acquire an already held LockN)r)   r*   r,   ru   rV   r>   rW   r"   r   r   r   rU     s   
	

z_LockImpl.acquire_nowaitc                    rj   )z(Acquire the lock, blocking if necessary.Nrk   r   r   r   r   r4   
  s   z_LockImpl.acquirec                 C   sB   t j }|| jurtd| jr| jjdd\| _dS d| _dS )zpRelease the lock.

        Raises:
          RuntimeError: if the calling task does not hold the lock.

        z"can't release a Lock you don't ownr   rM   N)r)   r*   r,   ru   rV   r>   rO   r"   r   r   r   r6     s   


z_LockImpl.releasec                 C   s   t |  | jt| jdS )a  Return an object containing debugging information.

        Currently the following fields are defined:

        * ``locked``: boolean indicating whether the lock is held.
        * ``owner``: the :class:`trio.lowlevel.Task` currently holding the lock,
          or None if the lock is not held.
        * ``tasks_waiting``: The number of tasks blocked on this lock's
          :meth:`acquire` method.

        )rn   ro   r   )rm   rn   ru   r/   r>   r   r   r   r   r0   (  s   z_LockImpl.statistics)r	   r
   r   r   r   r   r>   ru   rE   rn   r   rU   r4   r6   r0   r   r   r   r   rp     s    	


rp   c                   @      e Zd ZdZdS )LockaQ  A classic `mutex
    <https://en.wikipedia.org/wiki/Lock_(computer_science)>`__.

    This is a non-reentrant, single-owner lock. Unlike
    :class:`threading.Lock`, only the owner of the lock is allowed to release
    it.

    A :class:`Lock` object can be used as an async context manager; it
    blocks on entry but not on exit.

    Nr	   r
   r   r1   r   r   r   r   rx   9      rx   c                   @   rw   )StrictFIFOLocku  A variant of :class:`Lock` where tasks are guaranteed to acquire the
    lock in strict first-come-first-served order.

    An example of when this is useful is if you're implementing something like
    :class:`trio.SSLStream` or an HTTP/2 server using `h2
    <https://hyper-h2.readthedocs.io/>`__, where you have multiple concurrent
    tasks that are interacting with a shared state machine, and at
    unpredictable moments the state machine requests that a chunk of data be
    sent over the network. (For example, when using h2 simply reading incoming
    data can occasionally `create outgoing data to send
    <https://http2.github.io/http2-spec/#PING>`__.) The challenge is to make
    sure that these chunks are sent in the correct order, without being
    garbled.

    One option would be to use a regular :class:`Lock`, and wrap it around
    every interaction with the state machine::

        # This approach is sometimes workable but often sub-optimal; see below
        async with lock:
            state_machine.do_something()
            if state_machine.has_data_to_send():
                await conn.sendall(state_machine.get_data_to_send())

    But this can be problematic. If you're using h2 then *usually* reading
    incoming data doesn't create the need to send any data, so we don't want
    to force every task that tries to read from the network to sit and wait
    a potentially long time for ``sendall`` to finish. And in some situations
    this could even potentially cause a deadlock, if the remote peer is
    waiting for you to read some data before it accepts the data you're
    sending.

    :class:`StrictFIFOLock` provides an alternative. We can rewrite our
    example like::

        # Note: no awaits between when we start using the state machine and
        # when we block to take the lock!
        state_machine.do_something()
        if state_machine.has_data_to_send():
            # Notice that we fetch the data to send out of the state machine
            # *before* sleeping, so that other tasks won't see it.
            chunk = state_machine.get_data_to_send()
            async with strict_fifo_lock:
                await conn.sendall(chunk)

    First we do all our interaction with the state machine in a single
    scheduling quantum (notice there are no ``await``\s in there), so it's
    automatically atomic with respect to other tasks. And then if and only if
    we have data to send, we get in line to send it – and
    :class:`StrictFIFOLock` guarantees that each task will send its data in
    the same order that the state machine generated it.

    Currently, :class:`StrictFIFOLock` is identical to :class:`Lock`,
    but (a) this may not always be true in the future, especially if Trio ever
    implements `more sophisticated scheduling policies
    <https://github.com/python-trio/trio/issues/32>`__, and (b) the above code
    is relying on a pretty subtle property of its lock. Using a
    :class:`StrictFIFOLock` acts as an executable reminder that you're relying
    on this property.

    Nry   r   r   r   r   r{   G  rz   r{   c                   @   s   e Zd Ze Ze ZdS )_ConditionStatisticsN)r	   r
   r   r   r   r   lock_statisticsr   r   r   r   r|     s    r|   c                   @   s`   e Zd ZdZdddZdd Zdd Zd	d
 Zdd Ze	dd Z
dddZdd Zdd ZdS )	Conditiona  A classic `condition variable
    <https://en.wikipedia.org/wiki/Monitor_(synchronization)>`__, similar to
    :class:`threading.Condition`.

    A :class:`Condition` object can be used as an async context manager to
    acquire the underlying lock; it blocks on entry but not on exit.

    Args:
      lock (Lock): the lock object to use. If given, must be a
          :class:`trio.Lock`. If None, a new :class:`Lock` will be allocated
          and used.

    Nc                 C   s8   |d u rt  }t|t urtd|| _tj | _d S )Nzlock must be a trio.Lock)rx   typerJ   _lockr)   r*   r   r>   )r   lockr   r   r   rB     s   zCondition.__init__c                 C   rl   )zCheck whether the underlying lock is currently held.

        Returns:
          bool: True if the lock is held, False otherwise.

        )r   rn   r   r   r   r   rn     rv   zCondition.lockedc                 C   rl   )zAttempt to acquire the underlying lock, without blocking.

        Raises:
          WouldBlock: if the lock is currently held.

        )r   rU   r   r   r   r   rU     rv   zCondition.acquire_nowaitc                    s   | j  I dH  dS )z3Acquire the underlying lock, blocking if necessary.N)r   r4   r   r   r   r   r4     s   zCondition.acquirec                 C   s   | j   dS )zRelease the underlying lock.N)r   r6   r   r   r   r   r6     s   zCondition.releasec                    s~   t j | jjurtd|   z| j I dH  W dS    t j	dd | 
 I dH  W d    1 s9w   Y   )a  Wait for another task to call :meth:`notify` or
        :meth:`notify_all`.

        When calling this method, you must hold the lock. It releases the lock
        while waiting, and then re-acquires it before waking up.

        There is a subtlety with how this method interacts with cancellation:
        when cancelled it will block to re-acquire the lock before raising
        :exc:`Cancelled`. This may cause cancellation to be less prompt than
        expected. The advantage is that it makes code like this work::

           async with condition:
               await condition.wait()

        If we didn't re-acquire the lock before waking up, and :meth:`wait`
        were cancelled here, then we'd crash in ``condition.__aexit__`` when
        we tried to release the lock we no longer held.

        Raises:
          RuntimeError: if the calling task does not hold the lock.

        zmust hold the lock to waitNT)shield)r)   r*   r,   r   ru   rV   r6   r>   r\   ZCancelScoper4   r   r   r   r   r.     s   
zCondition.waitr   c                 C   s2   t j | jjurtd| jj| jj|d dS )zWake one or more tasks that are blocked in :meth:`wait`.

        Args:
          n (int): The number of tasks to wake.

        Raises:
          RuntimeError: if the calling task does not hold the lock.

        must hold the lock to notifyrM   N)r)   r*   r,   r   ru   rV   r>   Zrepark)r   nr   r   r   notify  s   
zCondition.notifyc                 C   s.   t j | jjurtd| j| jj dS )zWake all tasks that are currently blocked in :meth:`wait`.

        Raises:
          RuntimeError: if the calling task does not hold the lock.

        r   N)r)   r*   r,   r   ru   rV   r>   Z
repark_allr   r   r   r   
notify_all  s   zCondition.notify_allc                 C   s   t t| j| j dS )a^  Return an object containing debugging information.

        Currently the following fields are defined:

        * ``tasks_waiting``: The number of tasks blocked on this condition's
          :meth:`wait` method.
        * ``lock_statistics``: The result of calling the underlying
          :class:`Lock`\s  :meth:`~Lock.statistics` method.

        )r   r}   )r|   r/   r>   r   r0   r   r   r   r   r0     s   zCondition.statisticsr%   )r   )r	   r
   r   r1   rB   rn   rU   r4   r6   r   r.   r   r   r0   r   r   r   r   r~     s    
		

#r~   )rH   r   r)   rg   r   r   r   Z_utilr   sr   r   r3   r9   r=   rb   rm   rp   rx   r{   r|   r~   r   r   r   r   <module>   s2    
H

 s
zY
?