123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293 |
- import paddlers as pdrs
- from paddlers import transforms as T
- DATA_DIR = './data/sarship/'
- TRAIN_FILE_LIST_PATH = './data/sarship/train.txt'
- EVAL_FILE_LIST_PATH = './data/sarship/eval.txt'
- LABEL_LIST_PATH = './data/sarship/labels.txt'
- EXP_DIR = './output/ppyolo/'
- pdrs.utils.download_and_decompress(
- 'https://paddlers.bj.bcebos.com/datasets/sarship.zip', path='./data/')
- train_transforms = [
-
- T.RandomCrop(),
-
- T.RandomHorizontalFlip(),
-
- T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- ]
- train_batch_transforms = [
-
- T.BatchRandomResize(
- target_sizes=[512, 544, 576, 608], interp='RANDOM'),
- ]
- eval_transforms = [
-
- T.Resize(
- target_size=608, interp='CUBIC'),
-
- T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- ]
- train_dataset = pdrs.datasets.VOCDetDataset(
- data_dir=DATA_DIR,
- file_list=TRAIN_FILE_LIST_PATH,
- label_list=LABEL_LIST_PATH,
- transforms=train_transforms,
- batch_transforms=train_batch_transforms,
- shuffle=True)
- eval_dataset = pdrs.datasets.VOCDetDataset(
- data_dir=DATA_DIR,
- file_list=EVAL_FILE_LIST_PATH,
- label_list=LABEL_LIST_PATH,
- transforms=eval_transforms,
- shuffle=False)
- model = pdrs.tasks.det.PPYOLO(num_classes=len(train_dataset.labels))
- model.train(
- num_epochs=10,
- train_dataset=train_dataset,
- train_batch_size=4,
- eval_dataset=eval_dataset,
-
- save_interval_epochs=5,
-
- log_interval_steps=4,
- save_dir=EXP_DIR,
-
- pretrain_weights='COCO',
-
- learning_rate=0.0001,
-
- warmup_steps=0,
- warmup_start_lr=0.0,
-
- use_vdl=True)
|