1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798 |
- import os
- import paddlers as pdrs
- from paddlers import transforms as T
- DOWNLOAD_DIR = './data/sarship/'
- DATA_DIR = './data/sarship/sar_ship_1/'
- TRAIN_FILE_LIST_PATH = './data/sarship/sar_ship_1/train.txt'
- EVAL_FILE_LIST_PATH = './data/sarship/sar_ship_1/valid.txt'
- LABEL_LIST_PATH = './data/sarship/sar_ship_1/labels.txt'
- EXP_DIR = './output/yolov3/'
- sarship_dataset = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz'
- if not os.path.exists(DATA_DIR):
- pdrs.utils.download_and_decompress(sarship_dataset, path=DOWNLOAD_DIR)
- train_transforms = T.Compose([
-
- T.RandomDistort(),
-
- T.RandomExpand(),
-
- T.RandomCrop(),
-
- T.RandomHorizontalFlip(),
-
- T.BatchRandomResize(
- target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
- interp='RANDOM'),
-
- T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- ])
- eval_transforms = T.Compose([
-
- T.Resize(
- target_size=608, interp='CUBIC'),
-
- T.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- ])
- train_dataset = pdrs.datasets.VOCDetection(
- data_dir=DATA_DIR,
- file_list=TRAIN_FILE_LIST_PATH,
- label_list=LABEL_LIST_PATH,
- transforms=train_transforms,
- shuffle=True)
- eval_dataset = pdrs.datasets.VOCDetection(
- data_dir=DATA_DIR,
- file_list=EVAL_FILE_LIST_PATH,
- label_list=LABEL_LIST_PATH,
- transforms=eval_transforms,
- shuffle=False)
- model = pdrs.tasks.YOLOv3(
- num_classes=len(train_dataset.labels), backbone='DarkNet53')
- model.train(
- num_epochs=10,
- train_dataset=train_dataset,
- train_batch_size=4,
- eval_dataset=eval_dataset,
-
- save_interval_epochs=5,
-
- log_interval_steps=4,
- save_dir=EXP_DIR,
-
- learning_rate=0.0001,
-
- warmup_steps=0,
- warmup_start_lr=0.0,
-
- use_vdl=True)
|