import math import torch import torch.distributed as dist class RASampler(torch.utils.data.Sampler): """Sampler that restricts data loading to a subset of the dataset for distributed, with repeated augmentation. It ensures that different each augmented version of a sample will be visible to a different process (GPU). Heavily based on 'torch.utils.data.DistributedSampler'. This is borrowed from the DeiT Repo: https://github.com/facebookresearch/deit/blob/main/samplers.py """ def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0, repetitions=3): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available!") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available!") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(len(self.dataset) * float(repetitions) / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas)) self.shuffle = shuffle self.seed = seed self.repetitions = repetitions def __iter__(self): if self.shuffle: # Deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = list(range(len(self.dataset))) # Add extra samples to make it evenly divisible indices = [ele for ele in indices for i in range(self.repetitions)] indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # Subsample indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples return iter(indices[: self.num_selected_samples]) def __len__(self): return self.num_selected_samples def set_epoch(self, epoch): self.epoch = epoch