presets.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. import torch
  2. from torchvision.transforms.functional import InterpolationMode
  3. def get_module(use_v2):
  4. # We need a protected import to avoid the V2 warning in case just V1 is used
  5. if use_v2:
  6. import torchvision.transforms.v2
  7. return torchvision.transforms.v2
  8. else:
  9. import torchvision.transforms
  10. return torchvision.transforms
  11. class ClassificationPresetTrain:
  12. # Note: this transform assumes that the input to forward() are always PIL
  13. # images, regardless of the backend parameter. We may change that in the
  14. # future though, if we change the output type from the dataset.
  15. def __init__(
  16. self,
  17. *,
  18. crop_size,
  19. mean=(0.485, 0.456, 0.406),
  20. std=(0.229, 0.224, 0.225),
  21. interpolation=InterpolationMode.BILINEAR,
  22. hflip_prob=0.5,
  23. auto_augment_policy=None,
  24. ra_magnitude=9,
  25. augmix_severity=3,
  26. random_erase_prob=0.0,
  27. backend="pil",
  28. use_v2=False,
  29. ):
  30. T = get_module(use_v2)
  31. transforms = []
  32. backend = backend.lower()
  33. if backend == "tensor":
  34. transforms.append(T.PILToTensor())
  35. elif backend != "pil":
  36. raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
  37. transforms.append(T.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
  38. if hflip_prob > 0:
  39. transforms.append(T.RandomHorizontalFlip(hflip_prob))
  40. if auto_augment_policy is not None:
  41. if auto_augment_policy == "ra":
  42. transforms.append(T.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
  43. elif auto_augment_policy == "ta_wide":
  44. transforms.append(T.TrivialAugmentWide(interpolation=interpolation))
  45. elif auto_augment_policy == "augmix":
  46. transforms.append(T.AugMix(interpolation=interpolation, severity=augmix_severity))
  47. else:
  48. aa_policy = T.AutoAugmentPolicy(auto_augment_policy)
  49. transforms.append(T.AutoAugment(policy=aa_policy, interpolation=interpolation))
  50. if backend == "pil":
  51. transforms.append(T.PILToTensor())
  52. transforms.extend(
  53. [
  54. T.ToDtype(torch.float, scale=True) if use_v2 else T.ConvertImageDtype(torch.float),
  55. T.Normalize(mean=mean, std=std),
  56. ]
  57. )
  58. if random_erase_prob > 0:
  59. transforms.append(T.RandomErasing(p=random_erase_prob))
  60. if use_v2:
  61. transforms.append(T.ToPureTensor())
  62. self.transforms = T.Compose(transforms)
  63. def __call__(self, img):
  64. return self.transforms(img)
  65. class ClassificationPresetEval:
  66. def __init__(
  67. self,
  68. *,
  69. crop_size,
  70. resize_size=256,
  71. mean=(0.485, 0.456, 0.406),
  72. std=(0.229, 0.224, 0.225),
  73. interpolation=InterpolationMode.BILINEAR,
  74. backend="pil",
  75. use_v2=False,
  76. ):
  77. T = get_module(use_v2)
  78. transforms = []
  79. backend = backend.lower()
  80. if backend == "tensor":
  81. transforms.append(T.PILToTensor())
  82. elif backend != "pil":
  83. raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
  84. transforms += [
  85. T.Resize(resize_size, interpolation=interpolation, antialias=True),
  86. T.CenterCrop(crop_size),
  87. ]
  88. if backend == "pil":
  89. transforms.append(T.PILToTensor())
  90. transforms += [
  91. T.ToDtype(torch.float, scale=True) if use_v2 else T.ConvertImageDtype(torch.float),
  92. T.Normalize(mean=mean, std=std),
  93. ]
  94. if use_v2:
  95. transforms.append(T.ToPureTensor())
  96. self.transforms = T.Compose(transforms)
  97. def __call__(self, img):
  98. return self.transforms(img)