o
    <&iK                     @   s  d dl Z d dlZd dlZd dlmZmZmZmZmZm	Z	m
Z
mZmZ d dlmZmZ d dlmZ ddlmZmZ g dZe
dd	d
Ze
dZeeef Ze	edf Ze
deeZG dd dee ZG dd dee ee ZG dd dee	edf  ZG dd dee ZG dd dee Z G dd deZ!G dd dee Z"efdee deee#e$f  dee dee"e  fd d!Z%dS )"    N)	GenericIterableListOptionalSequenceTupleTypeVarUnionDict)default_generatorrandperm)_accumulate   )	GeneratorTensor)DatasetIterableDatasetTensorDatasetStackDatasetConcatDatasetChainDatasetSubsetrandom_splitT_coT)	covariantT.T_stackc                   @   s(   e Zd ZdZdefddZddd	Zd
S )r   a  An abstract class representing a :class:`Dataset`.

    All datasets that represent a map from keys to data samples should subclass
    it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a
    data sample for a given key. Subclasses could also optionally overwrite
    :meth:`__len__`, which is expected to return the size of the dataset by many
    :class:`~torch.utils.data.Sampler` implementations and the default options
    of :class:`~torch.utils.data.DataLoader`. Subclasses could also
    optionally implement :meth:`__getitems__`, for speedup batched samples
    loading. This method accepts list of indices of samples of batch and returns
    list of samples.

    .. note::
      :class:`~torch.utils.data.DataLoader` by default constructs an index
      sampler that yields integral indices.  To make it work with a map-style
      dataset with non-integral indices/keys, a custom sampler must be provided.
    returnc                 C   s   t d)Nz3Subclasses of Dataset should implement __getitem__.)NotImplementedErrorselfindex r"   CC:\wamp64\www\opt\env\Lib\site-packages\torch/utils/data/dataset.py__getitem__;   s   zDataset.__getitem__otherDataset[T_co]ConcatDataset[T_co]c                 C      t | |gS N)r   r    r%   r"   r"   r#   __add__B      zDataset.__add__N)r%   r&   r   r'   )__name__
__module____qualname____doc__r   r$   r+   r"   r"   r"   r#   r   (   s    r   c                   @   s"   e Zd ZdZdee fddZdS )r   aH  An iterable Dataset.

    All datasets that represent an iterable of data samples should subclass it.
    Such form of datasets is particularly useful when data come from a stream.

    All subclasses should overwrite :meth:`__iter__`, which would return an
    iterator of samples in this dataset.

    When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
    item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader`
    iterator. When :attr:`num_workers > 0`, each worker process will have a
    different copy of the dataset object, so it is often desired to configure
    each copy independently to avoid having duplicate data returned from the
    workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
    process, returns information about the worker. It can be used in either the
    dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
    :attr:`worker_init_fn` option to modify each copy's behavior.

    Example 1: splitting workload across all workers in :meth:`__iter__`::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
        >>> # xdoctest: +SKIP("Fails on MacOS12")
        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         worker_info = torch.utils.data.get_worker_info()
        ...         if worker_info is None:  # single-process data loading, return the full iterator
        ...             iter_start = self.start
        ...             iter_end = self.end
        ...         else:  # in a worker process
        ...             # split workload
        ...             per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
        ...             worker_id = worker_info.id
        ...             iter_start = self.start + worker_id * per_worker
        ...             iter_end = min(iter_start + per_worker, self.end)
        ...         return iter(range(iter_start, iter_end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [tensor([3]), tensor([4]), tensor([5]), tensor([6])]

        >>> # xdoctest: +REQUIRES(POSIX)
        >>> # Mult-process loading with two worker processes
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> # xdoctest: +IGNORE_WANT("non deterministic")
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [tensor([3]), tensor([5]), tensor([4]), tensor([6])]

        >>> # With even more workers
        >>> # xdoctest: +IGNORE_WANT("non deterministic")
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12)))
        [tensor([3]), tensor([5]), tensor([4]), tensor([6])]

    Example 2: splitting workload across all workers using :attr:`worker_init_fn`::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         return iter(range(self.start, self.end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [3, 4, 5, 6]
        >>>
        >>> # Directly doing multi-process loading yields duplicate data
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [3, 3, 4, 4, 5, 5, 6, 6]

        >>> # Define a `worker_init_fn` that configures each dataset copy differently
        >>> def worker_init_fn(worker_id):
        ...     worker_info = torch.utils.data.get_worker_info()
        ...     dataset = worker_info.dataset  # the dataset copy in this worker process
        ...     overall_start = dataset.start
        ...     overall_end = dataset.end
        ...     # configure the dataset to only process the split workload
        ...     per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
        ...     worker_id = worker_info.id
        ...     dataset.start = overall_start + worker_id * per_worker
        ...     dataset.end = min(dataset.start + per_worker, overall_end)
        ...

        >>> # Mult-process loading with the custom `worker_init_fn`
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
        [3, 5, 4, 6]

        >>> # With even more workers
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn)))
        [3, 4, 5, 6]
    r%   c                 C   r(   r)   )r   r*   r"   r"   r#   r+      r,   zIterableDataset.__add__N)r-   r.   r/   r0   r   r   r+   r"   r"   r"   r#   r   J   s    lr   c                   @   sD   e Zd ZU dZeedf ed< deddfddZdd	 Zd
d Z	dS )r   zDataset wrapping tensors.

    Each sample will be retrieved by indexing tensors along the first dimension.

    Args:
        *tensors (Tensor): tensors that have the same size of the first dimension.
    .tensorsr   Nc                    s(   t  fdd D sJ d | _d S )Nc                 3   s(    | ]} d   d | d kV  qdS )r   N)size.0Ztensorr1   r"   r#   	<genexpr>   s   & z)TensorDataset.__init__.<locals>.<genexpr>zSize mismatch between tensors)allr1   )r    r1   r"   r5   r#   __init__   s   
zTensorDataset.__init__c                    s   t  fdd| jD S )Nc                 3       | ]}|  V  qd S r)   r"   r3   r!   r"   r#   r6          z,TensorDataset.__getitem__.<locals>.<genexpr>)tupler1   r   r"   r:   r#   r$      s   zTensorDataset.__getitem__c                 C   s   | j d dS Nr   )r1   r2   r    r"   r"   r#   __len__   s   zTensorDataset.__len__)
r-   r.   r/   r0   r   r   __annotations__r8   r$   r?   r"   r"   r"   r#   r      s   
 r   c                   @   s^   e Zd ZU dZeeef ed< dee	 dee	 ddfddZ
d	d
 ZdefddZdd ZdS )r   a  Dataset as a stacking of multiple datasets.

    This class is useful to assemble different parts of complex input data, given as datasets.

    Example:
        >>> # xdoctest: +SKIP
        >>> images = ImageDataset()
        >>> texts = TextDataset()
        >>> tuple_stack = StackDataset(images, texts)
        >>> tuple_stack[0] == (images[0], texts[0])
        >>> dict_stack = StackDataset(image=images, text=texts)
        >>> dict_stack[0] == {'image': images[0], 'text': texts[0]}

    Args:
        *args (Dataset): Datasets for stacking returned as tuple.
        **kwargs (Dataset): Datasets for stacking returned as dict.
    datasetsargskwargsr   Nc                    s   |r#|rt dt|d  _t fdd|D rt d| _d S |rFt| }t|d  _t fdd|D rAt d| _d S t d)NztSupported either ``tuple``- (via ``args``) or``dict``- (via ``kwargs``) like input/output, but both types are given.r   c                 3       | ]
} j t|kV  qd S r)   _lengthlenr4   datasetr>   r"   r#   r6          z(StackDataset.__init__.<locals>.<genexpr>zSize mismatch between datasetsc                 3   rD   r)   rE   rH   r>   r"   r#   r6      rJ   z%At least one dataset should be passed)
ValueErrorrG   rF   anyrA   listvalues)r    rB   rC   tmpr"   r>   r#   r8      s   

zStackDataset.__init__c                    s<   t | jtr fdd| j D S t fdd| jD S )Nc                    s   i | ]	\}}||  qS r"   r"   )r4   krI   r:   r"   r#   
<dictcomp>   s    z,StackDataset.__getitem__.<locals>.<dictcomp>c                 3   r9   r)   r"   rH   r:   r"   r#   r6      r;   z+StackDataset.__getitem__.<locals>.<genexpr>)
isinstancerA   dictitemsr<   r   r"   r:   r#   r$      s   zStackDataset.__getitem__indicesc                 C   sj  t | jtr[dd |D }| j D ]F\}}tt|dd rH||}t|t|kr9tdt| dt| t	||D ]\}}|||< q>qt	||D ]
\}}|| ||< qMq|S dd |D }	| jD ]F}tt|dd r||}t|t|krtdt| dt| t	||	D ]	\}}
|

| qqet	||	D ]\}}
|

||  qqedd |	D }|S )Nc                 S   s   g | ]}i qS r"   r"   r4   _r"   r"   r#   
<listcomp>      z-StackDataset.__getitems__.<locals>.<listcomp>__getitems__z0Nested dataset's output size mismatch. Expected z, got c                 S   s   g | ]}g qS r"   r"   rV   r"   r"   r#   rX     rY   c                 S   s   g | ]}t |qS r"   )r<   )r4   sampler"   r"   r#   rX     s    )rR   rA   rS   rT   callablegetattrrZ   rG   rK   zipappend)r    rU   Z
dict_batchrP   rI   rT   dataZd_sampleidxZ
list_batchZt_sampleZtuple_batchr"   r"   r#   rZ      sH   



zStackDataset.__getitems__c                 C   s   | j S r)   )rF   r>   r"   r"   r#   r?   !  s   zStackDataset.__len__)r-   r.   r/   r0   r	   r<   rS   r@   r   r   r8   r$   rM   rZ   r?   r"   r"   r"   r#   r      s   
 !r   c                       st   e Zd ZU dZeee  ed< ee ed< e	dd Z
dee ddf fdd	Zd
d Zdd Zedd Z  ZS )r   zDataset as a concatenation of multiple datasets.

    This class is useful to assemble different existing datasets.

    Args:
        datasets (sequence): List of datasets to be concatenated
    rA   cumulative_sizesc                 C   s6   g d}}| D ]}t |}|||  ||7 }q|S r=   )rG   r_   )sequencerselr"   r"   r#   cumsum1  s   

zConcatDataset.cumsumr   Nc                    sZ   t    t|| _t| jdksJ d| jD ]}t|tr#J dq| | j| _d S )Nr   z(datasets should not be an empty iterablez.ConcatDataset does not support IterableDataset)	superr8   rM   rA   rG   rR   r   rh   rb   )r    rA   d	__class__r"   r#   r8   :  s   


zConcatDataset.__init__c                 C   s
   | j d S )N)rb   r>   r"   r"   r#   r?   B     
zConcatDataset.__len__c                 C   sf   |dk r| t | krtdt | | }t| j|}|dkr#|}n	|| j|d   }| j| | S )Nr   z8absolute value of index should not exceed dataset length   )rG   rK   bisectbisect_rightrb   rA   )r    ra   Zdataset_idxZ
sample_idxr"   r"   r#   r$   E  s   zConcatDataset.__getitem__c                 C   s   t jdtdd | jS )Nz:cummulative_sizes attribute is renamed to cumulative_sizes   )
stacklevel)warningswarnDeprecationWarningrb   r>   r"   r"   r#   cummulative_sizesQ  s   zConcatDataset.cummulative_sizes)r-   r.   r/   r0   r   r   r   r@   intstaticmethodrh   r   r8   r?   r$   propertyrw   __classcell__r"   r"   rk   r#   r   %  s   
 
r   c                       s>   e Zd ZdZdee ddf fddZdd Zd	d
 Z  Z	S )r   a_  Dataset for chaining multiple :class:`IterableDataset` s.

    This class is useful to assemble different existing dataset streams. The
    chaining operation is done on-the-fly, so concatenating large-scale
    datasets with this class will be efficient.

    Args:
        datasets (iterable of IterableDataset): datasets to be chained together
    rA   r   Nc                    s   t    || _d S r)   )ri   r8   rA   )r    rA   rk   r"   r#   r8   c  s   

zChainDataset.__init__c                 c   s.    | j D ]}t|tsJ d|E d H  qd S )N*ChainDataset only supports IterableDataset)rA   rR   r   )r    rj   r"   r"   r#   __iter__g  s
   
zChainDataset.__iter__c                 C   s2   d}| j D ]}t|tsJ d|t|7 }q|S )Nr   r|   )rA   rR   r   rG   )r    totalrj   r"   r"   r#   r?   l  s
   
zChainDataset.__len__)
r-   r.   r/   r0   r   r   r8   r}   r?   r{   r"   r"   rk   r#   r   X  s
    
r   c                   @   sr   e Zd ZU dZee ed< ee ed< dee dee ddfddZ	dd	 Z
dee dee fd
dZdd ZdS )r   z
    Subset of a dataset at specified indices.

    Args:
        dataset (Dataset): The whole Dataset
        indices (sequence): Indices in the whole set selected for subset
    rI   rU   r   Nc                 C   s   || _ || _d S r)   rI   rU   )r    rI   rU   r"   r"   r#   r8     s   
zSubset.__init__c                    s2   t |tr j fdd|D  S  j j|  S )Nc                       g | ]} j | qS r"   rU   )r4   ir>   r"   r#   rX         z&Subset.__getitem__.<locals>.<listcomp>)rR   rM   rI   rU   )r    ra   r"   r>   r#   r$     s   
zSubset.__getitem__c                    s>   t t jdd r j fdd|D S  fdd|D S )NrZ   c                    r   r"   r   r4   ra   r>   r"   r#   rX     r   z'Subset.__getitems__.<locals>.<listcomp>c                    s   g | ]
} j  j|  qS r"   r   r   r>   r"   r#   rX     s    )r\   r]   rI   rZ   )r    rU   r"   r>   r#   rZ     s   zSubset.__getitems__c                 C   s
   t | jS r)   )rG   rU   r>   r"   r"   r#   r?     rn   zSubset.__len__)r-   r.   r/   r0   r   r   r@   r   rx   r8   r$   r   rZ   r?   r"   r"   r"   r#   r   t  s   
 r   rI   lengths	generatorr   c           
         s&  t t|drnt|dkrng }t|D ]$\}}|dk s |dkr(td| dtt t | }|| qt t| }t	|D ]}|t| }||  d7  < qE|}t|D ]\}}	|	dkrmt
d| d q\t|t krztdtt||d  fd	d
tt||D S )a  
    Randomly split a dataset into non-overlapping new datasets of given lengths.

    If a list of fractions that sum up to 1 is given,
    the lengths will be computed automatically as
    floor(frac * len(dataset)) for each fraction provided.

    After computing the lengths, if there are any remainders, 1 count will be
    distributed in round-robin fashion to the lengths
    until there are no remainders left.

    Optionally fix the generator for reproducible results, e.g.:

    Example:
        >>> # xdoctest: +SKIP
        >>> generator1 = torch.Generator().manual_seed(42)
        >>> generator2 = torch.Generator().manual_seed(42)
        >>> random_split(range(10), [3, 7], generator=generator1)
        >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2)

    Args:
        dataset (Dataset): Dataset to be split
        lengths (sequence): lengths or fractions of splits to be produced
        generator (Generator): Generator used for the random permutation.
    ro   r   zFraction at index z is not between 0 and 1zLength of split at index z- is 0. This might result in an empty dataset.zDSum of input lengths does not equal the length of the input dataset!)r   c                    s&   g | ]\}}t  || | qS r"   )r   )r4   offsetlengthr   r"   r#   rX     s   & z random_split.<locals>.<listcomp>)mathisclosesum	enumeraterK   rx   floorrG   r_   rangert   ru   r   tolistr^   r   )
rI   r   r   Zsubset_lengthsr   fracZn_items_in_split	remainderZidx_to_add_atr   r"   r   r#   r     s,   r   )&rp   rt   r   typingr   r   r   r   r   r   r   r	   r
   Ztorchr   r   Ztorch._utilsr    r   r   __all__r   r   strZT_dictZT_tupler   r   r   r   r   r   r   r   rx   floatr   r"   r"   r"   r#   <module>   s4    ,"tQ3"
