1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889 |
- import paddlers as pdrs
- from paddlers import transforms as T
- DATA_DIR = './data/rssr/'
- TRAIN_FILE_LIST_PATH = './data/rssr/train.txt'
- EVAL_FILE_LIST_PATH = './data/rssr/val.txt'
- EXP_DIR = './output/drn/'
- pdrs.utils.download_and_decompress(
- 'https://paddlers.bj.bcebos.com/datasets/rssr.zip', path='./data/')
- train_transforms = T.Compose([
-
- T.DecodeImg(),
-
- T.RandomCrop(crop_size=96),
-
- T.RandomHorizontalFlip(prob=0.5),
-
- T.RandomVerticalFlip(prob=0.5),
-
- T.Normalize(
- mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
- T.ArrangeRestorer('train')
- ])
- eval_transforms = T.Compose([
- T.DecodeImg(),
-
- T.Resize(target_size=256),
-
- T.Normalize(
- mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
- T.ArrangeRestorer('eval')
- ])
- train_dataset = pdrs.datasets.ResDataset(
- data_dir=DATA_DIR,
- file_list=TRAIN_FILE_LIST_PATH,
- transforms=train_transforms,
- num_workers=0,
- shuffle=True,
- sr_factor=4)
- eval_dataset = pdrs.datasets.ResDataset(
- data_dir=DATA_DIR,
- file_list=EVAL_FILE_LIST_PATH,
- transforms=eval_transforms,
- num_workers=0,
- shuffle=False,
- sr_factor=4)
- model = pdrs.tasks.res.DRN()
- model.train(
- num_epochs=10,
- train_dataset=train_dataset,
- train_batch_size=8,
- eval_dataset=eval_dataset,
- save_interval_epochs=5,
-
- log_interval_steps=10,
- save_dir=EXP_DIR,
-
- learning_rate=0.001,
-
- early_stop=False,
-
- use_vdl=True,
-
- resume_checkpoint=None)
|