Bladeren bron

[Refactor] Refactor transform's compose and arranges (#111)

* Remove arrang op load

* Update trans pipline

* Update tests

* Update tutorials

* Bugfix

* -

* Fix and update indices

* Fix bugs and typo

* Update requirements.txt

---------

Co-authored-by: Bobholamovic <mhlin425@whu.edu.cn>
Yizhou Chen 2 jaren geleden
bovenliggende
commit
740bba17a4
51 gewijzigde bestanden met toevoegingen van 343 en 471 verwijderingen
  1. 11 1
      paddlers/datasets/base.py
  2. 1 1
      paddlers/datasets/cd_dataset.py
  3. 1 1
      paddlers/datasets/clas_dataset.py
  4. 1 1
      paddlers/datasets/coco.py
  5. 1 1
      paddlers/datasets/res_dataset.py
  6. 1 1
      paddlers/datasets/seg_dataset.py
  7. 2 2
      paddlers/datasets/voc.py
  8. 37 16
      paddlers/tasks/base.py
  9. 15 9
      paddlers/tasks/change_detector.py
  10. 14 7
      paddlers/tasks/classifier.py
  11. 13 7
      paddlers/tasks/object_detector.py
  12. 11 7
      paddlers/tasks/restorer.py
  13. 12 7
      paddlers/tasks/segmenter.py
  14. 1 1
      paddlers/tasks/utils/slider_predict.py
  15. 24 21
      paddlers/transforms/indices.py
  16. 2 17
      paddlers/transforms/operators.py
  17. 2 2
      requirements.txt
  18. 5 20
      tests/deploy/test_predictor.py
  19. 2 8
      tests/tasks/test_slider_predict.py
  20. 1 0
      tests/transforms/test_indices.py
  21. 6 11
      tutorials/train/change_detection/bit.py
  22. 6 11
      tutorials/train/change_detection/cdnet.py
  23. 6 11
      tutorials/train/change_detection/changeformer.py
  24. 6 11
      tutorials/train/change_detection/dsamnet.py
  25. 6 11
      tutorials/train/change_detection/dsifn.py
  26. 6 11
      tutorials/train/change_detection/fc_ef.py
  27. 6 11
      tutorials/train/change_detection/fc_siam_conc.py
  28. 6 11
      tutorials/train/change_detection/fc_siam_diff.py
  29. 6 11
      tutorials/train/change_detection/fccdn.py
  30. 6 11
      tutorials/train/change_detection/p2v.py
  31. 6 11
      tutorials/train/change_detection/snunet.py
  32. 6 11
      tutorials/train/change_detection/stanet.py
  33. 6 11
      tutorials/train/classification/condensenetv2.py
  34. 6 11
      tutorials/train/classification/hrnet.py
  35. 6 11
      tutorials/train/classification/mobilenetv3.py
  36. 6 11
      tutorials/train/classification/resnet50_vd.py
  37. 6 11
      tutorials/train/image_restoration/drn.py
  38. 6 11
      tutorials/train/image_restoration/esrgan.py
  39. 6 11
      tutorials/train/image_restoration/lesrcnn.py
  40. 6 11
      tutorials/train/object_detection/faster_rcnn.py
  41. 6 11
      tutorials/train/object_detection/ppyolo.py
  42. 6 11
      tutorials/train/object_detection/ppyolo_tiny.py
  43. 6 11
      tutorials/train/object_detection/ppyolov2.py
  44. 6 11
      tutorials/train/object_detection/yolov3.py
  45. 6 11
      tutorials/train/semantic_segmentation/bisenetv2.py
  46. 6 11
      tutorials/train/semantic_segmentation/deeplabv3p.py
  47. 6 11
      tutorials/train/semantic_segmentation/factseg.py
  48. 6 11
      tutorials/train/semantic_segmentation/farseg.py
  49. 6 11
      tutorials/train/semantic_segmentation/fast_scnn.py
  50. 6 11
      tutorials/train/semantic_segmentation/hrnet.py
  51. 6 11
      tutorials/train/semantic_segmentation/unet.py

+ 11 - 1
paddlers/datasets/base.py

@@ -18,7 +18,7 @@ from paddle.io import Dataset
 from paddle.fluid.dataloader.collate import default_collate_fn
 from paddle.fluid.dataloader.collate import default_collate_fn
 
 
 from paddlers.utils import get_num_workers
 from paddlers.utils import get_num_workers
-from paddlers.transforms import construct_sample_from_dict
+from paddlers.transforms import construct_sample_from_dict, Compose, DecodeImg
 
 
 
 
 class BaseDataset(Dataset):
 class BaseDataset(Dataset):
@@ -30,6 +30,8 @@ class BaseDataset(Dataset):
         self.data_dir = data_dir
         self.data_dir = data_dir
         self.label_list = label_list
         self.label_list = label_list
         self.transforms = deepcopy(transforms)
         self.transforms = deepcopy(transforms)
+        self.normal_transforms(self.transforms)
+
         self.num_workers = get_num_workers(num_workers)
         self.num_workers = get_num_workers(num_workers)
         self.shuffle = shuffle
         self.shuffle = shuffle
 
 
@@ -46,3 +48,11 @@ class BaseDataset(Dataset):
                 [s[0] for s in batch]), [s[1] for s in batch]
                 [s[0] for s in batch]), [s[1] for s in batch]
         else:
         else:
             return default_collate_fn([s[0] for s in batch])
             return default_collate_fn([s[0] for s in batch])
+
+    def normal_transforms(self, trans):
+        # NOTE: add `DecodeImg` and convert to `Compose`
+        if isinstance(trans, list):
+            trans = Compose(trans)
+        if not isinstance(trans.transforms[0], DecodeImg):
+            trans.transforms.insert(0, DecodeImg())
+        self.transforms = trans

+ 1 - 1
paddlers/datasets/cd_dataset.py

@@ -31,7 +31,7 @@ class CDDataset(BaseDataset):
             the change mask. When `with_seg_labels` is True, each line in the file contains the paths of the
             the change mask. When `with_seg_labels` is True, each line in the file contains the paths of the
             bi-temporal images, the path of the change mask, and the paths of the segmentation masks in both
             bi-temporal images, the path of the change mask, and the paths of the segmentation masks in both
             temporal phases.
             temporal phases.
-        transforms (paddlers.transforms.Compose): Data preprocessing and data augmentation operators to apply.
+        transforms (paddlers.transforms.Compose|list): Data preprocessing and data augmentation operators to apply.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
             the number of workers will be automatically determined according to the number of CPU cores: If 
             the number of workers will be automatically determined according to the number of CPU cores: If 

+ 1 - 1
paddlers/datasets/clas_dataset.py

@@ -25,7 +25,7 @@ class ClasDataset(BaseDataset):
     Args:
     Args:
         data_dir (str): Root directory of the dataset.
         data_dir (str): Root directory of the dataset.
         file_list (str): Path of the file that contains relative paths of images and labels.
         file_list (str): Path of the file that contains relative paths of images and labels.
-        transforms (paddlers.transforms.Compose): Data preprocessing and data augmentation operators to apply.
+        transforms (paddlers.transforms.Compose|list): Data preprocessing and data augmentation operators to apply.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
             the number of workers will be automatically determined according to the number of CPU cores: If 
             the number of workers will be automatically determined according to the number of CPU cores: If 

+ 1 - 1
paddlers/datasets/coco.py

@@ -35,7 +35,7 @@ class COCODetDataset(BaseDataset):
         data_dir (str): Root directory of the dataset.
         data_dir (str): Root directory of the dataset.
         image_dir (str): Directory that contains the images.
         image_dir (str): Directory that contains the images.
         ann_path (str): Path to COCO annotations.
         ann_path (str): Path to COCO annotations.
-        transforms (paddlers.transforms.Compose): Data preprocessing and data augmentation operators to apply.
+        transforms (paddlers.transforms.Compose|list): Data preprocessing and data augmentation operators to apply.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
             the number of workers will be automatically determined according to the number of CPU cores: If 
             the number of workers will be automatically determined according to the number of CPU cores: If 

+ 1 - 1
paddlers/datasets/res_dataset.py

@@ -26,7 +26,7 @@ class ResDataset(BaseDataset):
     Args:
     Args:
         data_dir (str): Root directory of the dataset.
         data_dir (str): Root directory of the dataset.
         file_list (str): Path of the file that contains relative paths of source and target image files.
         file_list (str): Path of the file that contains relative paths of source and target image files.
-        transforms (paddlers.transforms.Compose): Data preprocessing and data augmentation operators to apply.
+        transforms (paddlers.transforms.Compose|list): Data preprocessing and data augmentation operators to apply.
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
             the number of workers will be automatically determined according to the number of CPU cores: If 
             the number of workers will be automatically determined according to the number of CPU cores: If 
             there are more than 16 cores, 8 workers will be used. Otherwise, the number of workers will be half 
             there are more than 16 cores, 8 workers will be used. Otherwise, the number of workers will be half 

+ 1 - 1
paddlers/datasets/seg_dataset.py

@@ -26,7 +26,7 @@ class SegDataset(BaseDataset):
     Args:
     Args:
         data_dir (str): Root directory of the dataset.
         data_dir (str): Root directory of the dataset.
         file_list (str): Path of the file that contains relative paths of images and annotation files.
         file_list (str): Path of the file that contains relative paths of images and annotation files.
-        transforms (paddlers.transforms.Compose): Data preprocessing and data augmentation operators to apply.
+        transforms (paddlers.transforms.Compose|list): Data preprocessing and data augmentation operators to apply.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
             the number of workers will be automatically determined according to the number of CPU cores: If 
             the number of workers will be automatically determined according to the number of CPU cores: If 

+ 2 - 2
paddlers/datasets/voc.py

@@ -36,11 +36,11 @@ class VOCDetDataset(BaseDataset):
     Args:
     Args:
         data_dir (str): Root directory of the dataset.
         data_dir (str): Root directory of the dataset.
         file_list (str): Path of the file that contains relative paths of images and annotation files.
         file_list (str): Path of the file that contains relative paths of images and annotation files.
-        transforms (paddlers.transforms.Compose): Data preprocessing and data augmentation operators to apply.
+        transforms (paddlers.transforms.Compose|list): Data preprocessing and data augmentation operators to apply.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         label_list (str|None, optional): Path of the file that contains the category names. Defaults to None.
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
         num_workers (int|str, optional): Number of processes used for data loading. If `num_workers` is 'auto',
             the number of workers will be automatically determined according to the number of CPU cores: If 
             the number of workers will be automatically determined according to the number of CPU cores: If 
-            there are more than 16 cores8 workers will be used. Otherwise, the number of workers will be half 
+            there are more than 16 cores, 8 workers will be used. Otherwise, the number of workers will be half 
             the number of CPU cores. Defaults: 'auto'.
             the number of CPU cores. Defaults: 'auto'.
         shuffle (bool, optional): Whether to shuffle the samples. Defaults to False.
         shuffle (bool, optional): Whether to shuffle the samples. Defaults to False.
         allow_empty (bool, optional): Whether to add negative samples. Defaults to False.
         allow_empty (bool, optional): Whether to add negative samples. Defaults to False.

+ 37 - 16
paddlers/tasks/base.py

@@ -20,6 +20,7 @@ import math
 import json
 import json
 from functools import partial, wraps
 from functools import partial, wraps
 from inspect import signature
 from inspect import signature
+from typing import Optional
 
 
 import yaml
 import yaml
 import paddle
 import paddle
@@ -29,6 +30,7 @@ from paddleslim.analysis import flops
 from paddleslim import L1NormFilterPruner, FPGMFilterPruner
 from paddleslim import L1NormFilterPruner, FPGMFilterPruner
 
 
 import paddlers
 import paddlers
+from paddlers.transforms.operators import Compose, DecodeImg, Arrange
 import paddlers.utils.logging as logging
 import paddlers.utils.logging as logging
 from paddlers.utils import (
 from paddlers.utils import (
     seconds_to_hms, get_single_card_bs, dict2str, get_pretrain_weights,
     seconds_to_hms, get_single_card_bs, dict2str, get_pretrain_weights,
@@ -61,7 +63,7 @@ class ModelMeta(type):
 
 
 
 
 class BaseModel(metaclass=ModelMeta):
 class BaseModel(metaclass=ModelMeta):
-
+    _arrange: Optional[Arrange] = None
     find_unused_parameters = False
     find_unused_parameters = False
 
 
     def __init__(self, model_type):
     def __init__(self, model_type):
@@ -204,13 +206,11 @@ class BaseModel(metaclass=ModelMeta):
                     else:
                     else:
                         attr = op.__dict__
                         attr = op.__dict__
                     info['Transforms'].append({name: attr})
                     info['Transforms'].append({name: attr})
-                arrange = self.test_transforms.arrange
-                if arrange is not None:
-                    info['Transforms'].append({
-                        arrange.__class__.__name__: {
-                            'mode': 'test'
-                        }
-                    })
+                info['Transforms'].append({
+                    self._arrange.__name__: {
+                        'mode': 'test'
+                    }
+                })
         info['completed_epochs'] = self.completed_epochs
         info['completed_epochs'] = self.completed_epochs
         return info
         return info
 
 
@@ -267,11 +267,28 @@ class BaseModel(metaclass=ModelMeta):
         open(osp.join(save_dir, '.success'), 'w').close()
         open(osp.join(save_dir, '.success'), 'w').close()
         logging.info("Model saved in {}.".format(save_dir))
         logging.info("Model saved in {}.".format(save_dir))
 
 
+    def _build_transforms(self, trans, mode):
+        if isinstance(trans, list):
+            trans = Compose(trans)
+        if not isinstance(trans.transforms[0], DecodeImg):
+            trans.transforms.insert(0, DecodeImg())
+        if self._arrange is Arrange or not issubclass(self._arrange, Arrange):
+            raise ValueError(
+                "`self._arrange` must be set to a concrete Arrange type.")
+        if trans.arrange is None:
+            # For backward compatibility, we only set `trans.arrange`
+            # when it is not set by user.
+            trans.arrange = self._arrange(mode)
+        return trans
+
     def build_data_loader(self,
     def build_data_loader(self,
                           dataset,
                           dataset,
                           batch_size,
                           batch_size,
                           mode='train',
                           mode='train',
                           collate_fn=None):
                           collate_fn=None):
+        # NOTE: Append `Arrange` to transforms
+        dataset.transforms = self._build_transforms(dataset.transforms, mode)
+
         if dataset.num_samples < batch_size:
         if dataset.num_samples < batch_size:
             raise ValueError(
             raise ValueError(
                 'The volume of dataset({}) must be larger than batch size({}).'
                 'The volume of dataset({}) must be larger than batch size({}).'
@@ -315,7 +332,7 @@ class BaseModel(metaclass=ModelMeta):
                    early_stop=False,
                    early_stop=False,
                    early_stop_patience=5,
                    early_stop_patience=5,
                    use_vdl=True):
                    use_vdl=True):
-        self._check_transforms(train_dataset.transforms, 'train')
+        self._check_transforms(train_dataset.transforms)
 
 
         # XXX: Hard-coding
         # XXX: Hard-coding
         if self.model_type == 'detector' and 'RCNN' in self.__class__.__name__ and train_dataset.pos_num < len(
         if self.model_type == 'detector' and 'RCNN' in self.__class__.__name__ and train_dataset.pos_num < len(
@@ -351,6 +368,7 @@ class BaseModel(metaclass=ModelMeta):
 
 
         self.train_data_loader = self.build_data_loader(
         self.train_data_loader = self.build_data_loader(
             train_dataset, batch_size=train_batch_size, mode='train')
             train_dataset, batch_size=train_batch_size, mode='train')
+        self._check_arrange(self.train_data_loader.dataset.transforms, 'train')
 
 
         if eval_dataset is not None:
         if eval_dataset is not None:
             self.test_transforms = copy.deepcopy(eval_dataset.transforms)
             self.test_transforms = copy.deepcopy(eval_dataset.transforms)
@@ -493,7 +511,7 @@ class BaseModel(metaclass=ModelMeta):
 
 
         assert criterion in {'l1_norm', 'fpgm'}, \
         assert criterion in {'l1_norm', 'fpgm'}, \
             "Pruning criterion {} is not supported. Please choose from {'l1_norm', 'fpgm'}."
             "Pruning criterion {} is not supported. Please choose from {'l1_norm', 'fpgm'}."
-        self._check_transforms(dataset.transforms, 'eval')
+        self._check_transforms(dataset.transforms)
         # XXX: Hard-coding
         # XXX: Hard-coding
         if self.model_type == 'detector':
         if self.model_type == 'detector':
             self.net.eval()
             self.net.eval()
@@ -681,16 +699,19 @@ class BaseModel(metaclass=ModelMeta):
 
 
         return outputs
         return outputs
 
 
-    def _check_transforms(self, transforms, mode):
-        # NOTE: Check transforms and transforms.arrange and give user-friendly error messages.
-        if not isinstance(transforms, paddlers.transforms.Compose):
-            raise TypeError("`transforms` must be paddlers.transforms.Compose.")
+    def _check_transforms(self, transforms):
+        # NOTE: Check transforms
+        if not isinstance(transforms, Compose):
+            raise TypeError(
+                "`transforms` must be `paddlers.transforms.Compose`.")
+
+    def _check_arrange(self, transforms, mode):
         arrange_obj = transforms.arrange
         arrange_obj = transforms.arrange
-        if not isinstance(arrange_obj, paddlers.transforms.operators.Arrange):
+        if not isinstance(arrange_obj, Arrange):
             raise TypeError("`transforms.arrange` must be an Arrange object.")
             raise TypeError("`transforms.arrange` must be an Arrange object.")
         if arrange_obj.mode != mode:
         if arrange_obj.mode != mode:
             raise ValueError(
             raise ValueError(
-                f"Incorrect arrange mode! Expected {mode} but got {arrange_obj.mode}."
+                f"Incorrect arrange mode! Expected {repr(mode)} but got {repr(arrange_obj.mode)}."
             )
             )
 
 
     def run(self, net, inputs, mode):
     def run(self, net, inputs, mode):

+ 15 - 9
paddlers/tasks/change_detector.py

@@ -29,6 +29,7 @@ import paddlers.rs_models.cd as cmcd
 import paddlers.utils.logging as logging
 import paddlers.utils.logging as logging
 from paddlers.models import seg_losses
 from paddlers.models import seg_losses
 from paddlers.transforms import Resize, decode_image, construct_sample
 from paddlers.transforms import Resize, decode_image, construct_sample
+from paddlers.transforms.operators import ArrangeChangeDetector
 from paddlers.utils import get_single_card_bs
 from paddlers.utils import get_single_card_bs
 from paddlers.utils.checkpoint import cd_pretrain_weights_dict
 from paddlers.utils.checkpoint import cd_pretrain_weights_dict
 from .base import BaseModel
 from .base import BaseModel
@@ -43,6 +44,8 @@ __all__ = [
 
 
 
 
 class BaseChangeDetector(BaseModel):
 class BaseChangeDetector(BaseModel):
+    _arrange = ArrangeChangeDetector
+
     def __init__(self,
     def __init__(self,
                  model_name,
                  model_name,
                  num_classes=2,
                  num_classes=2,
@@ -426,7 +429,7 @@ class BaseChangeDetector(BaseModel):
                     "category_F1-score": F1 score}.
                     "category_F1-score": F1 score}.
         """
         """
 
 
-        self._check_transforms(eval_dataset.transforms, 'eval')
+        self._check_transforms(eval_dataset.transforms)
 
 
         self.net.eval()
         self.net.eval()
         nranks = paddle.distributed.get_world_size()
         nranks = paddle.distributed.get_world_size()
@@ -448,6 +451,7 @@ class BaseChangeDetector(BaseModel):
             )
             )
         self.eval_data_loader = self.build_data_loader(
         self.eval_data_loader = self.build_data_loader(
             eval_dataset, batch_size=batch_size, mode='eval')
             eval_dataset, batch_size=batch_size, mode='eval')
+        self._check_arrange(self.eval_data_loader.dataset.transforms, 'eval')
 
 
         intersect_area_all = 0
         intersect_area_all = 0
         pred_area_all = 0
         pred_area_all = 0
@@ -531,7 +535,7 @@ class BaseChangeDetector(BaseModel):
             img_file (list[tuple] | tuple[str|np.ndarray]): Tuple of image paths or 
             img_file (list[tuple] | tuple[str|np.ndarray]): Tuple of image paths or 
                 decoded image data for bi-temporal images, which also could constitute
                 decoded image data for bi-temporal images, which also could constitute
                 a list, meaning all image pairs to be predicted as a mini-batch.
                 a list, meaning all image pairs to be predicted as a mini-batch.
-            transforms (paddlers.transforms.Compose|None, optional): Transforms for 
+            transforms (paddlers.transforms.Compose|list|None, optional): Transforms for 
                 inputs. If None, the transforms for evaluation process will be used. 
                 inputs. If None, the transforms for evaluation process will be used. 
                 Defaults to None.
                 Defaults to None.
 
 
@@ -557,6 +561,8 @@ class BaseChangeDetector(BaseModel):
             images = [img_file]
             images = [img_file]
         else:
         else:
             images = img_file
             images = img_file
+        transforms = self._build_transforms(transforms, "test")
+        self._check_arrange(transforms, "test")
         data = self.preprocess(images, transforms, self.model_type)
         data = self.preprocess(images, transforms, self.model_type)
         self.net.eval()
         self.net.eval()
         outputs = self.run(self.net, data, 'test')
         outputs = self.run(self.net, data, 'test')
@@ -597,7 +603,7 @@ class BaseChangeDetector(BaseModel):
             overlap (list[int] | tuple[int] | int, optional):
             overlap (list[int] | tuple[int] | int, optional):
                 Overlap between two blocks. If `overlap` is a list or tuple, it should
                 Overlap between two blocks. If `overlap` is a list or tuple, it should
                 be in (W, H) format. Defaults to 36.
                 be in (W, H) format. Defaults to 36.
-            transforms (paddlers.transforms.Compose|None, optional): Transforms for 
+            transforms (paddlers.transforms.Compose|list|None, optional): Transforms for 
                 inputs. If None, the transforms for evaluation process will be used. 
                 inputs. If None, the transforms for evaluation process will be used. 
                 Defaults to None.
                 Defaults to None.
             invalid_value (int, optional): Value that marks invalid pixels in output 
             invalid_value (int, optional): Value that marks invalid pixels in output 
@@ -618,7 +624,7 @@ class BaseChangeDetector(BaseModel):
                        eager_load, not quiet)
                        eager_load, not quiet)
 
 
     def preprocess(self, images, transforms, to_tensor=True):
     def preprocess(self, images, transforms, to_tensor=True):
-        self._check_transforms(transforms, 'test')
+        self._check_transforms(transforms)
         batch_im1, batch_im2 = list(), list()
         batch_im1, batch_im2 = list(), list()
         batch_trans_info = list()
         batch_trans_info = list()
         for im1, im2 in images:
         for im1, im2 in images:
@@ -713,12 +719,12 @@ class BaseChangeDetector(BaseModel):
             score_maps.append(score_map.squeeze())
             score_maps.append(score_map.squeeze())
         return label_maps, score_maps
         return label_maps, score_maps
 
 
-    def _check_transforms(self, transforms, mode):
-        super()._check_transforms(transforms, mode)
-        if not isinstance(transforms.arrange,
-                          paddlers.transforms.ArrangeChangeDetector):
+    def _check_arrange(self, transforms, mode):
+        super()._check_arrange(transforms, mode)
+        if not isinstance(transforms.arrange, ArrangeChangeDetector):
             raise TypeError(
             raise TypeError(
-                "`transforms.arrange` must be an ArrangeChangeDetector object.")
+                "`transforms.arrange` must be an `ArrangeChangeDetector` object."
+            )
 
 
     def set_losses(self, losses, weights=None):
     def set_losses(self, losses, weights=None):
         if weights is None:
         if weights is None:

+ 14 - 7
paddlers/tasks/classifier.py

@@ -29,12 +29,16 @@ from paddlers.models import clas_losses
 from paddlers.models.ppcls.data.postprocess import build_postprocess
 from paddlers.models.ppcls.data.postprocess import build_postprocess
 from paddlers.utils.checkpoint import cls_pretrain_weights_dict
 from paddlers.utils.checkpoint import cls_pretrain_weights_dict
 from paddlers.transforms import Resize, decode_image, construct_sample
 from paddlers.transforms import Resize, decode_image, construct_sample
+from paddlers.transforms.operators import ArrangeClassifier
+
 from .base import BaseModel
 from .base import BaseModel
 
 
 __all__ = ["ResNet50_vd", "MobileNetV3", "HRNet", "CondenseNetV2"]
 __all__ = ["ResNet50_vd", "MobileNetV3", "HRNet", "CondenseNetV2"]
 
 
 
 
 class BaseClassifier(BaseModel):
 class BaseClassifier(BaseModel):
+    _arrange = ArrangeClassifier
+
     def __init__(self,
     def __init__(self,
                  model_name,
                  model_name,
                  in_channels=3,
                  in_channels=3,
@@ -380,7 +384,7 @@ class BaseClassifier(BaseModel):
                  "top5": acc of top5}.
                  "top5": acc of top5}.
         """
         """
 
 
-        self._check_transforms(eval_dataset.transforms, 'eval')
+        self._check_transforms(eval_dataset.transforms)
 
 
         self.net.eval()
         self.net.eval()
         nranks = paddle.distributed.get_world_size()
         nranks = paddle.distributed.get_world_size()
@@ -400,6 +404,8 @@ class BaseClassifier(BaseModel):
         if nranks < 2 or local_rank == 0:
         if nranks < 2 or local_rank == 0:
             self.eval_data_loader = self.build_data_loader(
             self.eval_data_loader = self.build_data_loader(
                 eval_dataset, batch_size=batch_size, mode='eval')
                 eval_dataset, batch_size=batch_size, mode='eval')
+            self._check_arrange(self.eval_data_loader.dataset.transforms,
+                                'eval')
             logging.info(
             logging.info(
                 "Start to evaluate(total_samples={}, total_steps={})...".format(
                 "Start to evaluate(total_samples={}, total_steps={})...".format(
                     eval_dataset.num_samples, eval_dataset.num_samples))
                     eval_dataset.num_samples, eval_dataset.num_samples))
@@ -455,6 +461,8 @@ class BaseClassifier(BaseModel):
             images = [img_file]
             images = [img_file]
         else:
         else:
             images = img_file
             images = img_file
+        transforms = self._build_transforms(transforms, "test")
+        self._check_arrange(transforms, "test")
         data, _ = self.preprocess(images, transforms, self.model_type)
         data, _ = self.preprocess(images, transforms, self.model_type)
         self.net.eval()
         self.net.eval()
 
 
@@ -480,7 +488,7 @@ class BaseClassifier(BaseModel):
         return prediction
         return prediction
 
 
     def preprocess(self, images, transforms, to_tensor=True):
     def preprocess(self, images, transforms, to_tensor=True):
-        self._check_transforms(transforms, 'test')
+        self._check_transforms(transforms)
         batch_im = list()
         batch_im = list()
         for im in images:
         for im in images:
             if isinstance(im, str):
             if isinstance(im, str):
@@ -496,12 +504,11 @@ class BaseClassifier(BaseModel):
 
 
         return batch_im, None
         return batch_im, None
 
 
-    def _check_transforms(self, transforms, mode):
-        super()._check_transforms(transforms, mode)
-        if not isinstance(transforms.arrange,
-                          paddlers.transforms.ArrangeClassifier):
+    def _check_arrange(self, transforms, mode):
+        super()._check_arrange(transforms, mode)
+        if not isinstance(transforms.arrange, ArrangeClassifier):
             raise TypeError(
             raise TypeError(
-                "`transforms.arrange` must be an ArrangeClassifier object.")
+                "`transforms.arrange` must be an `ArrangeClassifier` object.")
 
 
     def build_data_loader(self,
     def build_data_loader(self,
                           dataset,
                           dataset,

+ 13 - 7
paddlers/tasks/object_detector.py

@@ -26,6 +26,7 @@ import paddlers.models.ppdet as ppdet
 from paddlers.models.ppdet.modeling.proposal_generator.target_layer import BBoxAssigner, MaskAssigner
 from paddlers.models.ppdet.modeling.proposal_generator.target_layer import BBoxAssigner, MaskAssigner
 from paddlers.transforms import decode_image, construct_sample
 from paddlers.transforms import decode_image, construct_sample
 from paddlers.transforms.operators import _NormalizeBox, _PadBox, _BboxXYXY2XYWH, Resize, Pad
 from paddlers.transforms.operators import _NormalizeBox, _PadBox, _BboxXYXY2XYWH, Resize, Pad
+from paddlers.transforms.operators import ArrangeDetector
 from paddlers.transforms.batch_operators import BatchCompose, BatchRandomResize, BatchRandomResizeByShort, \
 from paddlers.transforms.batch_operators import BatchCompose, BatchRandomResize, BatchRandomResizeByShort, \
     _BatchPad, _Gt2YoloTarget
     _BatchPad, _Gt2YoloTarget
 from paddlers.models.ppdet.optimizer import ModelEMA
 from paddlers.models.ppdet.optimizer import ModelEMA
@@ -42,6 +43,8 @@ __all__ = [
 
 
 
 
 class BaseDetector(BaseModel):
 class BaseDetector(BaseModel):
+    _arrange = ArrangeDetector
+
     def __init__(self, model_name, num_classes=80, **params):
     def __init__(self, model_name, num_classes=80, **params):
         self.init_params.update(locals())
         self.init_params.update(locals())
         if 'with_net' in self.init_params:
         if 'with_net' in self.init_params:
@@ -530,8 +533,8 @@ class BaseDetector(BaseModel):
                 }
                 }
         eval_dataset.batch_transforms = self._compose_batch_transform(
         eval_dataset.batch_transforms = self._compose_batch_transform(
             eval_dataset.transforms, mode='eval')
             eval_dataset.transforms, mode='eval')
-        self._check_transforms(eval_dataset.transforms, 'eval')
 
 
+        self._check_transforms(eval_dataset.transforms)
         self.net.eval()
         self.net.eval()
         nranks = paddle.distributed.get_world_size()
         nranks = paddle.distributed.get_world_size()
         local_rank = paddle.distributed.get_rank()
         local_rank = paddle.distributed.get_rank()
@@ -570,6 +573,8 @@ class BaseDetector(BaseModel):
                     coco_gt=copy.deepcopy(eval_dataset.coco_gt),
                     coco_gt=copy.deepcopy(eval_dataset.coco_gt),
                     classwise=False)
                     classwise=False)
             scores = collections.OrderedDict()
             scores = collections.OrderedDict()
+
+            self._check_arrange(eval_dataset.transforms, 'eval')
             logging.info(
             logging.info(
                 "Start to evaluate(total_samples={}, total_steps={})...".format(
                 "Start to evaluate(total_samples={}, total_steps={})...".format(
                     eval_dataset.num_samples, eval_dataset.num_samples))
                     eval_dataset.num_samples, eval_dataset.num_samples))
@@ -623,6 +628,8 @@ class BaseDetector(BaseModel):
         else:
         else:
             images = img_file
             images = img_file
 
 
+        transforms = self._build_transforms(transforms, "test")
+        self._check_arrange(transforms, "test")
         batch_samples, _ = self.preprocess(images, transforms)
         batch_samples, _ = self.preprocess(images, transforms)
         self.net.eval()
         self.net.eval()
         outputs = self.run(self.net, batch_samples, 'test')
         outputs = self.run(self.net, batch_samples, 'test')
@@ -633,7 +640,7 @@ class BaseDetector(BaseModel):
         return prediction
         return prediction
 
 
     def preprocess(self, images, transforms, to_tensor=True):
     def preprocess(self, images, transforms, to_tensor=True):
-        self._check_transforms(transforms, 'test')
+        self._check_transforms(transforms)
         batch_samples = list()
         batch_samples = list()
         for im in images:
         for im in images:
             if isinstance(im, str):
             if isinstance(im, str):
@@ -718,12 +725,11 @@ class BaseDetector(BaseModel):
 
 
         return results
         return results
 
 
-    def _check_transforms(self, transforms, mode):
-        super()._check_transforms(transforms, mode)
-        if not isinstance(transforms.arrange,
-                          paddlers.transforms.ArrangeDetector):
+    def _check_arrange(self, transforms, mode):
+        super()._check_arrange(transforms, mode)
+        if not isinstance(transforms.arrange, ArrangeDetector):
             raise TypeError(
             raise TypeError(
-                "`transforms.arrange` must be an ArrangeDetector object.")
+                "`transforms.arrange` must be an `ArrangeDetector` object.")
 
 
     def get_pruning_info(self):
     def get_pruning_info(self):
         info = super().get_pruning_info()
         info = super().get_pruning_info()

+ 11 - 7
paddlers/tasks/restorer.py

@@ -29,6 +29,7 @@ import paddlers.utils.logging as logging
 from paddlers.models import res_losses
 from paddlers.models import res_losses
 from paddlers.models.ppgan.modules.init import init_weights
 from paddlers.models.ppgan.modules.init import init_weights
 from paddlers.transforms import Resize, decode_image, construct_sample
 from paddlers.transforms import Resize, decode_image, construct_sample
+from paddlers.transforms.operators import ArrangeRestorer
 from paddlers.transforms.functions import calc_hr_shape
 from paddlers.transforms.functions import calc_hr_shape
 from paddlers.utils.checkpoint import res_pretrain_weights_dict
 from paddlers.utils.checkpoint import res_pretrain_weights_dict
 from .base import BaseModel
 from .base import BaseModel
@@ -39,6 +40,7 @@ __all__ = ["DRN", "LESRCNN", "ESRGAN"]
 
 
 
 
 class BaseRestorer(BaseModel):
 class BaseRestorer(BaseModel):
+    _arrange = ArrangeRestorer
     MIN_MAX = (0., 1.)
     MIN_MAX = (0., 1.)
     TEST_OUT_KEY = None
     TEST_OUT_KEY = None
 
 
@@ -390,7 +392,7 @@ class BaseRestorer(BaseModel):
 
 
         """
         """
 
 
-        self._check_transforms(eval_dataset.transforms, 'eval')
+        self._check_transforms(eval_dataset.transforms)
 
 
         self.net.eval()
         self.net.eval()
         nranks = paddle.distributed.get_world_size()
         nranks = paddle.distributed.get_world_size()
@@ -411,6 +413,7 @@ class BaseRestorer(BaseModel):
         if nranks < 2 or local_rank == 0:
         if nranks < 2 or local_rank == 0:
             self.eval_data_loader = self.build_data_loader(
             self.eval_data_loader = self.build_data_loader(
                 eval_dataset, batch_size=batch_size, mode='eval')
                 eval_dataset, batch_size=batch_size, mode='eval')
+            self._check_arrange(eval_dataset.transforms, 'eval')
             # XXX: Hard-code crop_border and test_y_channel
             # XXX: Hard-code crop_border and test_y_channel
             psnr = metrics.PSNR(crop_border=4, test_y_channel=True)
             psnr = metrics.PSNR(crop_border=4, test_y_channel=True)
             ssim = metrics.SSIM(crop_border=4, test_y_channel=True)
             ssim = metrics.SSIM(crop_border=4, test_y_channel=True)
@@ -466,6 +469,8 @@ class BaseRestorer(BaseModel):
             images = [img_file]
             images = [img_file]
         else:
         else:
             images = img_file
             images = img_file
+        transforms = self._build_transforms(transforms, "test")
+        self._check_arrange(transforms, "test")
         data = self.preprocess(images, transforms, self.model_type)
         data = self.preprocess(images, transforms, self.model_type)
         self.net.eval()
         self.net.eval()
         outputs = self.run(self.net, data, 'test')
         outputs = self.run(self.net, data, 'test')
@@ -477,7 +482,7 @@ class BaseRestorer(BaseModel):
         return prediction
         return prediction
 
 
     def preprocess(self, images, transforms, to_tensor=True):
     def preprocess(self, images, transforms, to_tensor=True):
-        self._check_transforms(transforms, 'test')
+        self._check_transforms(transforms)
         batch_im = list()
         batch_im = list()
         batch_trans_info = list()
         batch_trans_info = list()
         for im in images:
         for im in images:
@@ -559,12 +564,11 @@ class BaseRestorer(BaseModel):
             res_maps.append(res_map.squeeze())
             res_maps.append(res_map.squeeze())
         return res_maps
         return res_maps
 
 
-    def _check_transforms(self, transforms, mode):
-        super()._check_transforms(transforms, mode)
-        if not isinstance(transforms.arrange,
-                          paddlers.transforms.ArrangeRestorer):
+    def _check_arrange(self, transforms, mode):
+        super()._check_arrange(transforms, mode)
+        if not isinstance(transforms.arrange, ArrangeRestorer):
             raise TypeError(
             raise TypeError(
-                "`transforms.arrange` must be an ArrangeRestorer object.")
+                "`transforms.arrange` must be an `ArrangeRestorer` object.")
 
 
     def build_data_loader(self,
     def build_data_loader(self,
                           dataset,
                           dataset,

+ 12 - 7
paddlers/tasks/segmenter.py

@@ -28,6 +28,7 @@ import paddlers.rs_models.seg as cmseg
 import paddlers.utils.logging as logging
 import paddlers.utils.logging as logging
 from paddlers.models import seg_losses
 from paddlers.models import seg_losses
 from paddlers.transforms import Resize, decode_image, construct_sample
 from paddlers.transforms import Resize, decode_image, construct_sample
+from paddlers.transforms.operators import ArrangeSegmenter
 from paddlers.utils import get_single_card_bs, DisablePrint
 from paddlers.utils import get_single_card_bs, DisablePrint
 from paddlers.utils.checkpoint import seg_pretrain_weights_dict
 from paddlers.utils.checkpoint import seg_pretrain_weights_dict
 from .base import BaseModel
 from .base import BaseModel
@@ -42,6 +43,8 @@ __all__ = [
 
 
 
 
 class BaseSegmenter(BaseModel):
 class BaseSegmenter(BaseModel):
+    _arrange = ArrangeSegmenter
+
     def __init__(self,
     def __init__(self,
                  model_name,
                  model_name,
                  num_classes=2,
                  num_classes=2,
@@ -406,7 +409,7 @@ class BaseSegmenter(BaseModel):
 
 
         """
         """
 
 
-        self._check_transforms(eval_dataset.transforms, 'eval')
+        self._check_transforms(eval_dataset.transforms)
         self.net.eval()
         self.net.eval()
         nranks = paddle.distributed.get_world_size()
         nranks = paddle.distributed.get_world_size()
         local_rank = paddle.distributed.get_rank()
         local_rank = paddle.distributed.get_rank()
@@ -426,6 +429,7 @@ class BaseSegmenter(BaseModel):
                 "is forcibly set to {}.".format(batch_size))
                 "is forcibly set to {}.".format(batch_size))
         self.eval_data_loader = self.build_data_loader(
         self.eval_data_loader = self.build_data_loader(
             eval_dataset, batch_size=batch_size, mode='eval')
             eval_dataset, batch_size=batch_size, mode='eval')
+        self._check_arrange(eval_dataset.transforms, 'eval')
 
 
         intersect_area_all = 0
         intersect_area_all = 0
         pred_area_all = 0
         pred_area_all = 0
@@ -524,6 +528,8 @@ class BaseSegmenter(BaseModel):
             images = [img_file]
             images = [img_file]
         else:
         else:
             images = img_file
             images = img_file
+        transforms = self._build_transforms(transforms, "test")
+        self._check_arrange(transforms, "test")
         data = self.preprocess(images, transforms, self.model_type)
         data = self.preprocess(images, transforms, self.model_type)
         self.net.eval()
         self.net.eval()
         outputs = self.run(self.net, data, 'test')
         outputs = self.run(self.net, data, 'test')
@@ -585,7 +591,7 @@ class BaseSegmenter(BaseModel):
                        eager_load, not quiet)
                        eager_load, not quiet)
 
 
     def preprocess(self, images, transforms, to_tensor=True):
     def preprocess(self, images, transforms, to_tensor=True):
-        self._check_transforms(transforms, 'test')
+        self._check_transforms(transforms)
         batch_im = list()
         batch_im = list()
         batch_trans_info = list()
         batch_trans_info = list()
         for im in images:
         for im in images:
@@ -675,12 +681,11 @@ class BaseSegmenter(BaseModel):
             score_maps.append(score_map.squeeze())
             score_maps.append(score_map.squeeze())
         return label_maps, score_maps
         return label_maps, score_maps
 
 
-    def _check_transforms(self, transforms, mode):
-        super()._check_transforms(transforms, mode)
-        if not isinstance(transforms.arrange,
-                          paddlers.transforms.ArrangeSegmenter):
+    def _check_arrange(self, transforms, mode):
+        super()._check_arrange(transforms, mode)
+        if not isinstance(transforms.arrange, ArrangeSegmenter):
             raise TypeError(
             raise TypeError(
-                "`transforms.arrange` must be an ArrangeSegmenter object.")
+                "`transforms.arrange` must be an `ArrangeSegmenter` object.")
 
 
     def set_losses(self, losses, weights=None):
     def set_losses(self, losses, weights=None):
         if weights is None:
         if weights is None:

+ 1 - 1
paddlers/tasks/utils/slider_predict.py

@@ -330,7 +330,7 @@ def slider_predict(predict_func,
         overlap (list[int] | tuple[int] | int):
         overlap (list[int] | tuple[int] | int):
             Overlap between two blocks. If `overlap` is list or tuple, it should
             Overlap between two blocks. If `overlap` is list or tuple, it should
             be in (W, H) format.
             be in (W, H) format.
-        transforms (paddlers.transforms.Compose|None): Transforms for inputs. If 
+        transforms (paddlers.transforms.Compose|list|None): Transforms for inputs. If 
             None, the transforms for evaluation process will be used. 
             None, the transforms for evaluation process will be used. 
         invalid_value (int): Value that marks invalid pixels in output image. 
         invalid_value (int): Value that marks invalid pixels in output image. 
             Defaults to 255.
             Defaults to 255.

+ 24 - 21
paddlers/transforms/indices.py

@@ -27,21 +27,24 @@ __all__ = [
 ]
 ]
 
 
 EPS = 1e-32
 EPS = 1e-32
-BAND_NAMES = ["b", "g", "r", "re1", "re2", "re3", "n", "s1", "s2", "t1", "t2"]
-
-# | Band name | Description | Wavelength (μm) | Satellite |
-# |-----------|-------------|-----------------|-----------|
-# |     b     | Blue        |   0.450-0.515   | Landsat8  |
-# |     g     | Green       |   0.525-0.600   | Landsat8  |
-# |     r     | Red         |   0.630-0.680   | Landsat8  |
-# |    re1    | Red Edge 1  |   0.698-0.713   | Sentinel2 |
-# |    re2    | Red Edge 2  |   0.733-0.748   | Sentinel2 |
-# |    re3    | Red Edge 3  |   0.773-0.793   | Sentinel2 |
-# |     n     | NIR         |   0.845-0.885   | Landsat8  |
-# |    s1     | SWIR 1      |   1.560-1.660   | Landsat8  |
-# |    s2     | SWIR 2      |   2.100-2.300   | Landsat8  |
-# |    t1     | Thermal 1   |   10.60-11.19   | Landsat8  |
-# |    t2     | Thermal 2   |   11.50-12.51   | Landsat8  |
+BAND_NAMES = [
+    "b", "g", "r", "re1", "re2", "re3", "n", "s1", "s2", "t", "t1", "t2"
+]
+
+# | Band name |   Description    | Wavelength (μm) | Satellite |
+# |-----------|------------------|-----------------|-----------|
+# |     b     | Blue             |   0.450-0.515   | Landsat8  |
+# |     g     | Green            |   0.525-0.600   | Landsat8  |
+# |     r     | Red              |   0.630-0.680   | Landsat8  |
+# |    re1    | Red Edge 1       |   0.698-0.713   | Sentinel2 |
+# |    re2    | Red Edge 2       |   0.733-0.748   | Sentinel2 |
+# |    re3    | Red Edge 3       |   0.773-0.793   | Sentinel2 |
+# |     n     | NIR              |   0.845-0.885   | Landsat8  |
+# |    s1     | SWIR 1           |   1.560-1.660   | Landsat8  |
+# |    s2     | SWIR 2           |   2.100-2.300   | Landsat8  |
+# |     t     | Thermal Infrared |   10.40-12.50   | Landsat7  |
+# |    t1     | Thermal 1        |   10.60-11.19   | Landsat8  |
+# |    t2     | Thermal 2        |   11.50-12.51   | Landsat8  |
 
 
 
 
 class RSIndex(metaclass=abc.ABCMeta):
 class RSIndex(metaclass=abc.ABCMeta):
@@ -189,8 +192,8 @@ class CSI(RSIndex):
 
 
 
 
 class CSIT(RSIndex):
 class CSIT(RSIndex):
-    def _compute(self, n, s2, t1):
-        return n / ((s2 * t1) / 10000.0 + EPS)
+    def _compute(self, n, s2, t):
+        return n / ((s2 * t) / 10000.0 + EPS)
 
 
 
 
 class DBI(RSIndex):
 class DBI(RSIndex):
@@ -213,9 +216,9 @@ class DVI(RSIndex):
 
 
 
 
 class EBBI(RSIndex):
 class EBBI(RSIndex):
-    def _compute(self, n, s1, t1):
+    def _compute(self, n, s1, t):
         num = s1 - n
         num = s1 - n
-        denom = (10.0 * ((s1 + t1)**0.5))
+        denom = (10.0 * ((s1 + t)**0.5))
         return num / (denom + EPS)
         return num / (denom + EPS)
 
 
 
 
@@ -334,8 +337,8 @@ class MSI(RSIndex):
 
 
 
 
 class NBLI(RSIndex):
 class NBLI(RSIndex):
-    def _compute(self, r, t1):
-        return compute_normalized_difference_index(r, t1)
+    def _compute(self, r, t):
+        return compute_normalized_difference_index(r, t)
 
 
 
 
 class NDSI(RSIndex):
 class NDSI(RSIndex):

+ 2 - 17
paddlers/transforms/operators.py

@@ -61,11 +61,6 @@ __all__ = [
     "ReloadMask",
     "ReloadMask",
     "AppendIndex",
     "AppendIndex",
     "MatchRadiance",
     "MatchRadiance",
-    "ArrangeRestorer",
-    "ArrangeSegmenter",
-    "ArrangeChangeDetector",
-    "ArrangeClassifier",
-    "ArrangeDetector",
 ]
 ]
 
 
 interp_dict = {
 interp_dict = {
@@ -115,7 +110,8 @@ class Compose(object):
                 "Length of transforms must not be less than 1, but received is {}."
                 "Length of transforms must not be less than 1, but received is {}."
                 .format(len(transforms)))
                 .format(len(transforms)))
         transforms = copy.deepcopy(transforms)
         transforms = copy.deepcopy(transforms)
-        self.arrange = self._pick_arrange(transforms)
+        # We will have to do a late binding of `self.arrange`
+        self.arrange = None
         self.transforms = transforms
         self.transforms = transforms
 
 
     def __call__(self, sample):
     def __call__(self, sample):
@@ -143,17 +139,6 @@ class Compose(object):
             sample = self.arrange(sample)
             sample = self.arrange(sample)
         return sample
         return sample
 
 
-    def _pick_arrange(self, transforms):
-        arrange = None
-        for idx, op in enumerate(transforms):
-            if isinstance(op, Arrange):
-                if idx != len(transforms) - 1:
-                    raise ValueError(
-                        "Arrange operator must be placed at the end of the list."
-                    )
-                arrange = transforms.pop(idx)
-        return arrange
-
 
 
 class Transform(object):
 class Transform(object):
     """
     """

+ 2 - 2
requirements.txt

@@ -24,5 +24,5 @@ scikit-learn
 scikit-image >= 0.14.0
 scikit-image >= 0.14.0
 scipy
 scipy
 shapely
 shapely
-spyndex
-visualdl >= 2.1.1
+spyndex >= 0.4.0
+visualdl >= 2.1.1

+ 5 - 20
tests/deploy/test_predictor.py

@@ -126,10 +126,7 @@ class TestCDPredictor(TestPredictor):
         t2_path = "data/ssmt/optical_t2.bmp"
         t2_path = "data/ssmt/optical_t2.bmp"
         single_input = (t1_path, t2_path)
         single_input = (t1_path, t2_path)
         num_inputs = 2
         num_inputs = 2
-        transforms = pdrs.transforms.Compose([
-            pdrs.transforms.DecodeImg(), pdrs.transforms.Normalize(),
-            pdrs.transforms.ArrangeChangeDetector('test')
-        ])
+        transforms = [pdrs.transforms.Normalize()]
 
 
         # Expected failure
         # Expected failure
         with self.assertRaises(ValueError):
         with self.assertRaises(ValueError):
@@ -194,10 +191,7 @@ class TestClasPredictor(TestPredictor):
     def check_predictor(self, predictor, trainer):
     def check_predictor(self, predictor, trainer):
         single_input = "data/ssst/optical.bmp"
         single_input = "data/ssst/optical.bmp"
         num_inputs = 2
         num_inputs = 2
-        transforms = pdrs.transforms.Compose([
-            pdrs.transforms.DecodeImg(), pdrs.transforms.Normalize(),
-            pdrs.transforms.ArrangeClassifier('test')
-        ])
+        transforms = [pdrs.transforms.Normalize()]
         labels = list(range(2))
         labels = list(range(2))
         trainer.labels = labels
         trainer.labels = labels
         predictor._model.labels = labels
         predictor._model.labels = labels
@@ -263,10 +257,7 @@ class TestDetPredictor(TestPredictor):
         # given that the network is (partially?) randomly initialized.
         # given that the network is (partially?) randomly initialized.
         single_input = "data/ssst/optical.bmp"
         single_input = "data/ssst/optical.bmp"
         num_inputs = 2
         num_inputs = 2
-        transforms = pdrs.transforms.Compose([
-            pdrs.transforms.DecodeImg(), pdrs.transforms.Normalize(),
-            pdrs.transforms.ArrangeDetector('test')
-        ])
+        transforms = [pdrs.transforms.Normalize()]
         labels = list(range(80))
         labels = list(range(80))
         trainer.labels = labels
         trainer.labels = labels
         predictor._model.labels = labels
         predictor._model.labels = labels
@@ -328,10 +319,7 @@ class TestResPredictor(TestPredictor):
         # because the output is of uint8 type.
         # because the output is of uint8 type.
         single_input = "data/ssst/optical.bmp"
         single_input = "data/ssst/optical.bmp"
         num_inputs = 2
         num_inputs = 2
-        transforms = pdrs.transforms.Compose([
-            pdrs.transforms.DecodeImg(), pdrs.transforms.Normalize(),
-            pdrs.transforms.ArrangeRestorer('test')
-        ])
+        transforms = [pdrs.transforms.Normalize()]
 
 
         # Single input (file path)
         # Single input (file path)
         input_ = single_input
         input_ = single_input
@@ -383,10 +371,7 @@ class TestSegPredictor(TestPredictor):
     def check_predictor(self, predictor, trainer):
     def check_predictor(self, predictor, trainer):
         single_input = "data/ssst/optical.bmp"
         single_input = "data/ssst/optical.bmp"
         num_inputs = 2
         num_inputs = 2
-        transforms = pdrs.transforms.Compose([
-            pdrs.transforms.DecodeImg(), pdrs.transforms.Normalize(),
-            pdrs.transforms.ArrangeSegmenter('test')
-        ])
+        transforms = [pdrs.transforms.Normalize()]
 
 
         # Single input (file path)
         # Single input (file path)
         input_ = single_input
         input_ = single_input

+ 2 - 8
tests/tasks/test_slider_predict.py

@@ -232,10 +232,7 @@ class _TestSliderPredictNamespace:
 class TestSegSliderPredict(_TestSliderPredictNamespace.TestSliderPredict):
 class TestSegSliderPredict(_TestSliderPredictNamespace.TestSliderPredict):
     def setUp(self):
     def setUp(self):
         self.model = pdrs.tasks.seg.UNet(in_channels=10)
         self.model = pdrs.tasks.seg.UNet(in_channels=10)
-        self.transforms = T.Compose([
-            T.DecodeImg(), T.Normalize([0.5] * 10, [0.5] * 10),
-            T.ArrangeSegmenter('test')
-        ])
+        self.transforms = [T.Normalize([0.5] * 10, [0.5] * 10)]
         self.image_path = "data/ssst/multispectral.tif"
         self.image_path = "data/ssst/multispectral.tif"
         self.ref_path = self.image_path
         self.ref_path = self.image_path
         self.basename = osp.basename(self.ref_path)
         self.basename = osp.basename(self.ref_path)
@@ -244,10 +241,7 @@ class TestSegSliderPredict(_TestSliderPredictNamespace.TestSliderPredict):
 class TestCDSliderPredict(_TestSliderPredictNamespace.TestSliderPredict):
 class TestCDSliderPredict(_TestSliderPredictNamespace.TestSliderPredict):
     def setUp(self):
     def setUp(self):
         self.model = pdrs.tasks.cd.BIT(in_channels=10)
         self.model = pdrs.tasks.cd.BIT(in_channels=10)
-        self.transforms = T.Compose([
-            T.DecodeImg(), T.Normalize([0.5] * 10, [0.5] * 10),
-            T.ArrangeChangeDetector('test')
-        ])
+        self.transforms = [T.Normalize([0.5] * 10, [0.5] * 10)]
         self.image_path = ("data/ssmt/multispectral_t1.tif",
         self.image_path = ("data/ssmt/multispectral_t1.tif",
                            "data/ssmt/multispectral_t2.tif")
                            "data/ssmt/multispectral_t2.tif")
         self.ref_path = self.image_path[0]
         self.ref_path = self.image_path[0]

+ 1 - 0
tests/transforms/test_indices.py

@@ -30,6 +30,7 @@ NAME_MAPPING = {
     'n': 'N',
     'n': 'N',
     's1': 'S1',
     's1': 'S1',
     's2': 'S2',
     's2': 'S2',
+    't': 'T',
     't1': 'T1',
     't1': 'T1',
     't2': 'T2'
     't2': 'T2'
 }
 }

+ 6 - 11
tutorials/train/change_detection/bit.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/cdnet.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/changeformer.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/transforms.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/transforms.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/dsamnet.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/dsifn.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/fc_ef.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/fc_siam_conc.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/fc_siam_diff.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/fccdn.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/transforms.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/transforms.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/p2v.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/snunet.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/change_detection/stanet.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪
     # 随机裁剪
     T.RandomCrop(
     T.RandomCrop(
         # 裁剪区域将被缩放到256x256
         # 裁剪区域将被缩放到256x256
@@ -37,18 +35,15 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeChangeDetector('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeChangeDetector('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.CDDataset(
 train_dataset = pdrs.datasets.CDDataset(

+ 6 - 11
tutorials/train/classification/condensenetv2.py

@@ -24,9 +24,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到256x256大小
     # 将影像缩放到256x256大小
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
@@ -35,18 +33,15 @@ train_transforms = T.Compose([
     T.RandomVerticalFlip(prob=0.5),
     T.RandomVerticalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('eval')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.ClasDataset(
 train_dataset = pdrs.datasets.ClasDataset(

+ 6 - 11
tutorials/train/classification/hrnet.py

@@ -24,9 +24,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到256x256大小
     # 将影像缩放到256x256大小
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
@@ -35,18 +33,15 @@ train_transforms = T.Compose([
     T.RandomVerticalFlip(prob=0.5),
     T.RandomVerticalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('eval')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.ClasDataset(
 train_dataset = pdrs.datasets.ClasDataset(

+ 6 - 11
tutorials/train/classification/mobilenetv3.py

@@ -24,9 +24,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到256x256大小
     # 将影像缩放到256x256大小
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
@@ -35,18 +33,15 @@ train_transforms = T.Compose([
     T.RandomVerticalFlip(prob=0.5),
     T.RandomVerticalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('eval')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.ClasDataset(
 train_dataset = pdrs.datasets.ClasDataset(

+ 6 - 11
tutorials/train/classification/resnet50_vd.py

@@ -24,9 +24,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到256x256大小
     # 将影像缩放到256x256大小
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
@@ -35,18 +33,15 @@ train_transforms = T.Compose([
     T.RandomVerticalFlip(prob=0.5),
     T.RandomVerticalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeClassifier('eval')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.ClasDataset(
 train_dataset = pdrs.datasets.ClasDataset(

+ 6 - 11
tutorials/train/image_restoration/drn.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 从输入影像中裁剪96x96大小的影像块
     # 从输入影像中裁剪96x96大小的影像块
     T.RandomCrop(crop_size=96),
     T.RandomCrop(crop_size=96),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
@@ -33,19 +31,16 @@ train_transforms = T.Compose([
     T.RandomVerticalFlip(prob=0.5),
     T.RandomVerticalFlip(prob=0.5),
     # 将数据归一化到[0,1]
     # 将数据归一化到[0,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
-    T.ArrangeRestorer('train')
-])
+        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 将输入影像缩放到256x256大小
     # 将输入影像缩放到256x256大小
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
-    T.ArrangeRestorer('eval')
-])
+        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.ResDataset(
 train_dataset = pdrs.datasets.ResDataset(

+ 6 - 11
tutorials/train/image_restoration/esrgan.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 从输入影像中裁剪32x32大小的影像块
     # 从输入影像中裁剪32x32大小的影像块
     T.RandomCrop(crop_size=32),
     T.RandomCrop(crop_size=32),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
@@ -33,19 +31,16 @@ train_transforms = T.Compose([
     T.RandomVerticalFlip(prob=0.5),
     T.RandomVerticalFlip(prob=0.5),
     # 将数据归一化到[0,1]
     # 将数据归一化到[0,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
-    T.ArrangeRestorer('train')
-])
+        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 将输入影像缩放到256x256大小
     # 将输入影像缩放到256x256大小
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
-    T.ArrangeRestorer('eval')
-])
+        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.ResDataset(
 train_dataset = pdrs.datasets.ResDataset(

+ 6 - 11
tutorials/train/image_restoration/lesrcnn.py

@@ -22,9 +22,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 从输入影像中裁剪32x32大小的影像块
     # 从输入影像中裁剪32x32大小的影像块
     T.RandomCrop(crop_size=32),
     T.RandomCrop(crop_size=32),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
@@ -33,19 +31,16 @@ train_transforms = T.Compose([
     T.RandomVerticalFlip(prob=0.5),
     T.RandomVerticalFlip(prob=0.5),
     # 将数据归一化到[0,1]
     # 将数据归一化到[0,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
-    T.ArrangeRestorer('train')
-])
+        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 将输入影像缩放到256x256大小
     # 将输入影像缩放到256x256大小
     T.Resize(target_size=256),
     T.Resize(target_size=256),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
-    T.ArrangeRestorer('eval')
-])
+        mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.ResDataset(
 train_dataset = pdrs.datasets.ResDataset(

+ 6 - 11
tutorials/train/object_detection/faster_rcnn.py

@@ -26,9 +26,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪,裁块大小在一定范围内变动
     # 随机裁剪,裁块大小在一定范围内变动
     T.RandomCrop(),
     T.RandomCrop(),
     # 随机水平翻转
     # 随机水平翻转
@@ -38,20 +36,17 @@ train_transforms = T.Compose([
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
     # 影像归一化
     # 影像归一化
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('train')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 使用双三次插值将输入影像缩放到固定大小
     # 使用双三次插值将输入影像缩放到固定大小
     T.Resize(
     T.Resize(
         target_size=608, interp='CUBIC'),
         target_size=608, interp='CUBIC'),
     # 验证阶段与训练阶段的归一化方式必须相同
     # 验证阶段与训练阶段的归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('eval')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.VOCDetDataset(
 train_dataset = pdrs.datasets.VOCDetDataset(

+ 6 - 11
tutorials/train/object_detection/ppyolo.py

@@ -26,9 +26,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪,裁块大小在一定范围内变动
     # 随机裁剪,裁块大小在一定范围内变动
     T.RandomCrop(),
     T.RandomCrop(),
     # 随机水平翻转
     # 随机水平翻转
@@ -38,20 +36,17 @@ train_transforms = T.Compose([
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
     # 影像归一化
     # 影像归一化
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('train')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 使用双三次插值将输入影像缩放到固定大小
     # 使用双三次插值将输入影像缩放到固定大小
     T.Resize(
     T.Resize(
         target_size=608, interp='CUBIC'),
         target_size=608, interp='CUBIC'),
     # 验证阶段与训练阶段的归一化方式必须相同
     # 验证阶段与训练阶段的归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('eval')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.VOCDetDataset(
 train_dataset = pdrs.datasets.VOCDetDataset(

+ 6 - 11
tutorials/train/object_detection/ppyolo_tiny.py

@@ -26,9 +26,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪,裁块大小在一定范围内变动
     # 随机裁剪,裁块大小在一定范围内变动
     T.RandomCrop(),
     T.RandomCrop(),
     # 随机水平翻转
     # 随机水平翻转
@@ -38,20 +36,17 @@ train_transforms = T.Compose([
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
     # 影像归一化
     # 影像归一化
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('train')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 使用双三次插值将输入影像缩放到固定大小
     # 使用双三次插值将输入影像缩放到固定大小
     T.Resize(
     T.Resize(
         target_size=608, interp='CUBIC'),
         target_size=608, interp='CUBIC'),
     # 验证阶段与训练阶段的归一化方式必须相同
     # 验证阶段与训练阶段的归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('eval')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.VOCDetDataset(
 train_dataset = pdrs.datasets.VOCDetDataset(

+ 6 - 11
tutorials/train/object_detection/ppyolov2.py

@@ -26,9 +26,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪,裁块大小在一定范围内变动
     # 随机裁剪,裁块大小在一定范围内变动
     T.RandomCrop(),
     T.RandomCrop(),
     # 随机水平翻转
     # 随机水平翻转
@@ -38,20 +36,17 @@ train_transforms = T.Compose([
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
     # 影像归一化
     # 影像归一化
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('train')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 使用双三次插值将输入影像缩放到固定大小
     # 使用双三次插值将输入影像缩放到固定大小
     T.Resize(
     T.Resize(
         target_size=608, interp='CUBIC'),
         target_size=608, interp='CUBIC'),
     # 验证阶段与训练阶段的归一化方式必须相同
     # 验证阶段与训练阶段的归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('eval')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.VOCDetDataset(
 train_dataset = pdrs.datasets.VOCDetDataset(

+ 6 - 11
tutorials/train/object_detection/yolov3.py

@@ -26,9 +26,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 随机裁剪,裁块大小在一定范围内变动
     # 随机裁剪,裁块大小在一定范围内变动
     T.RandomCrop(),
     T.RandomCrop(),
     # 随机水平翻转
     # 随机水平翻转
@@ -38,20 +36,17 @@ train_transforms = T.Compose([
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
         target_sizes=[512, 544, 576, 608], interp='RANDOM'),
     # 影像归一化
     # 影像归一化
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('train')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 使用双三次插值将输入影像缩放到固定大小
     # 使用双三次插值将输入影像缩放到固定大小
     T.Resize(
     T.Resize(
         target_size=608, interp='CUBIC'),
         target_size=608, interp='CUBIC'),
     # 验证阶段与训练阶段的归一化方式必须相同
     # 验证阶段与训练阶段的归一化方式必须相同
     T.Normalize(
     T.Normalize(
-        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-    T.ArrangeDetector('eval')
-])
+        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.VOCDetDataset(
 train_dataset = pdrs.datasets.VOCDetDataset(

+ 6 - 11
tutorials/train/semantic_segmentation/bisenetv2.py

@@ -27,28 +27,23 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到512x512大小
     # 将影像缩放到512x512大小
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ArrangeSegmenter('train')
-])
+        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS)
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ReloadMask(),
-    T.ArrangeSegmenter('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.SegDataset(
 train_dataset = pdrs.datasets.SegDataset(

+ 6 - 11
tutorials/train/semantic_segmentation/deeplabv3p.py

@@ -27,28 +27,23 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到512x512大小
     # 将影像缩放到512x512大小
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ArrangeSegmenter('train')
-])
+        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS)
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ReloadMask(),
-    T.ArrangeSegmenter('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.SegDataset(
 train_dataset = pdrs.datasets.SegDataset(

+ 6 - 11
tutorials/train/semantic_segmentation/factseg.py

@@ -24,9 +24,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 选择前三个波段
     # 选择前三个波段
     T.SelectBand([1, 2, 3]),
     T.SelectBand([1, 2, 3]),
     # 将影像缩放到512x512大小
     # 将影像缩放到512x512大小
@@ -35,21 +33,18 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeSegmenter('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段应当选择相同的波段
     # 验证阶段与训练阶段应当选择相同的波段
     T.SelectBand([1, 2, 3]),
     T.SelectBand([1, 2, 3]),
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeSegmenter('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.SegDataset(
 train_dataset = pdrs.datasets.SegDataset(

+ 6 - 11
tutorials/train/semantic_segmentation/farseg.py

@@ -24,9 +24,7 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 选择前三个波段
     # 选择前三个波段
     T.SelectBand([1, 2, 3]),
     T.SelectBand([1, 2, 3]),
     # 将影像缩放到512x512大小
     # 将影像缩放到512x512大小
@@ -35,21 +33,18 @@ train_transforms = T.Compose([
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ArrangeSegmenter('train')
-])
+        mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     # 验证阶段与训练阶段应当选择相同的波段
     # 验证阶段与训练阶段应当选择相同的波段
     T.SelectBand([1, 2, 3]),
     T.SelectBand([1, 2, 3]),
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
-    T.ReloadMask(),
-    T.ArrangeSegmenter('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.SegDataset(
 train_dataset = pdrs.datasets.SegDataset(

+ 6 - 11
tutorials/train/semantic_segmentation/fast_scnn.py

@@ -27,28 +27,23 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到512x512大小
     # 将影像缩放到512x512大小
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ArrangeSegmenter('train')
-])
+        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS)
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ReloadMask(),
-    T.ArrangeSegmenter('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.SegDataset(
 train_dataset = pdrs.datasets.SegDataset(

+ 6 - 11
tutorials/train/semantic_segmentation/hrnet.py

@@ -27,28 +27,23 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到512x512大小
     # 将影像缩放到512x512大小
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ArrangeSegmenter('train')
-])
+        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS)
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ReloadMask(),
-    T.ArrangeSegmenter('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.SegDataset(
 train_dataset = pdrs.datasets.SegDataset(

+ 6 - 11
tutorials/train/semantic_segmentation/unet.py

@@ -27,28 +27,23 @@ pdrs.utils.download_and_decompress(
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 定义训练和验证时使用的数据变换(数据增强、预处理等)
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # 使用Compose组合多种变换方式。Compose中包含的变换将按顺序串行执行
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
 # API说明:https://github.com/PaddlePaddle/PaddleRS/blob/develop/docs/apis/data.md
-train_transforms = T.Compose([
-    # 读取影像
-    T.DecodeImg(),
+train_transforms = [
     # 将影像缩放到512x512大小
     # 将影像缩放到512x512大小
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 以50%的概率实施随机水平翻转
     # 以50%的概率实施随机水平翻转
     T.RandomHorizontalFlip(prob=0.5),
     T.RandomHorizontalFlip(prob=0.5),
     # 将数据归一化到[-1,1]
     # 将数据归一化到[-1,1]
     T.Normalize(
     T.Normalize(
-        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ArrangeSegmenter('train')
-])
+        mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS)
+]
 
 
-eval_transforms = T.Compose([
-    T.DecodeImg(),
+eval_transforms = [
     T.Resize(target_size=512),
     T.Resize(target_size=512),
     # 验证阶段与训练阶段的数据归一化方式必须相同
     # 验证阶段与训练阶段的数据归一化方式必须相同
     T.Normalize(
     T.Normalize(
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
         mean=[0.5] * NUM_BANDS, std=[0.5] * NUM_BANDS),
-    T.ReloadMask(),
-    T.ArrangeSegmenter('eval')
-])
+    T.ReloadMask()
+]
 
 
 # 分别构建训练和验证所用的数据集
 # 分别构建训练和验证所用的数据集
 train_dataset = pdrs.datasets.SegDataset(
 train_dataset = pdrs.datasets.SegDataset(