segmenter.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import math
  15. import os
  16. import os.path as osp
  17. from collections import OrderedDict
  18. import numpy as np
  19. import cv2
  20. import paddle
  21. import paddle.nn.functional as F
  22. from paddle.static import InputSpec
  23. import paddlers.models.ppseg as paddleseg
  24. import paddlers.custom_models.seg as cmseg
  25. import paddlers
  26. from paddlers.transforms import arrange_transforms
  27. from paddlers.utils import get_single_card_bs, DisablePrint
  28. import paddlers.utils.logging as logging
  29. from .base import BaseModel
  30. from .utils import seg_metrics as metrics
  31. from paddlers.utils.checkpoint import seg_pretrain_weights_dict
  32. from paddlers.transforms import ImgDecoder, Resize
  33. __all__ = ["UNet", "DeepLabV3P", "FastSCNN", "HRNet", "BiSeNetV2", "FarSeg"]
  34. class BaseSegmenter(BaseModel):
  35. def __init__(self,
  36. model_name,
  37. num_classes=2,
  38. use_mixed_loss=False,
  39. **params):
  40. self.init_params = locals()
  41. if 'with_net' in self.init_params:
  42. del self.init_params['with_net']
  43. super(BaseSegmenter, self).__init__('segmenter')
  44. if not hasattr(paddleseg.models, model_name) and \
  45. not hasattr(cmseg, model_name):
  46. raise Exception("ERROR: There's no model named {}.".format(
  47. model_name))
  48. self.model_name = model_name
  49. self.num_classes = num_classes
  50. self.use_mixed_loss = use_mixed_loss
  51. self.losses = None
  52. self.labels = None
  53. if params.get('with_net', True):
  54. params.pop('with_net', None)
  55. self.net = self.build_net(**params)
  56. self.find_unused_parameters = True
  57. def build_net(self, **params):
  58. # TODO: when using paddle.utils.unique_name.guard,
  59. # DeepLabv3p and HRNet will raise a error
  60. net = dict(paddleseg.models.__dict__,
  61. **cmseg.__dict__)[self.model_name](
  62. num_classes=self.num_classes, **params)
  63. return net
  64. def _fix_transforms_shape(self, image_shape):
  65. if hasattr(self, 'test_transforms'):
  66. if self.test_transforms is not None:
  67. has_resize_op = False
  68. resize_op_idx = -1
  69. normalize_op_idx = len(self.test_transforms.transforms)
  70. for idx, op in enumerate(self.test_transforms.transforms):
  71. name = op.__class__.__name__
  72. if name == 'Normalize':
  73. normalize_op_idx = idx
  74. if 'Resize' in name:
  75. has_resize_op = True
  76. resize_op_idx = idx
  77. if not has_resize_op:
  78. self.test_transforms.transforms.insert(
  79. normalize_op_idx, Resize(target_size=image_shape))
  80. else:
  81. self.test_transforms.transforms[resize_op_idx] = Resize(
  82. target_size=image_shape)
  83. def _get_test_inputs(self, image_shape):
  84. if image_shape is not None:
  85. if len(image_shape) == 2:
  86. image_shape = [1, 3] + image_shape
  87. self._fix_transforms_shape(image_shape[-2:])
  88. else:
  89. image_shape = [None, 3, -1, -1]
  90. self.fixed_input_shape = image_shape
  91. input_spec = [
  92. InputSpec(
  93. shape=image_shape, name='image', dtype='float32')
  94. ]
  95. return input_spec
  96. def run(self, net, inputs, mode):
  97. net_out = net(inputs[0])
  98. logit = net_out[0]
  99. outputs = OrderedDict()
  100. if mode == 'test':
  101. origin_shape = inputs[1]
  102. if self.status == 'Infer':
  103. label_map_list, score_map_list = self._postprocess(
  104. net_out, origin_shape, transforms=inputs[2])
  105. else:
  106. logit_list = self._postprocess(
  107. logit, origin_shape, transforms=inputs[2])
  108. label_map_list = []
  109. score_map_list = []
  110. for logit in logit_list:
  111. logit = paddle.transpose(logit, perm=[0, 2, 3, 1]) # NHWC
  112. label_map_list.append(
  113. paddle.argmax(
  114. logit, axis=-1, keepdim=False, dtype='int32')
  115. .squeeze().numpy())
  116. score_map_list.append(
  117. F.softmax(
  118. logit, axis=-1).squeeze().numpy().astype('float32'))
  119. outputs['label_map'] = label_map_list
  120. outputs['score_map'] = score_map_list
  121. if mode == 'eval':
  122. if self.status == 'Infer':
  123. pred = paddle.unsqueeze(net_out[0], axis=1) # NCHW
  124. else:
  125. pred = paddle.argmax(logit, axis=1, keepdim=True, dtype='int32')
  126. label = inputs[1]
  127. origin_shape = [label.shape[-2:]]
  128. pred = self._postprocess(
  129. pred, origin_shape, transforms=inputs[2])[0] # NCHW
  130. intersect_area, pred_area, label_area = paddleseg.utils.metrics.calculate_area(
  131. pred, label, self.num_classes)
  132. outputs['intersect_area'] = intersect_area
  133. outputs['pred_area'] = pred_area
  134. outputs['label_area'] = label_area
  135. outputs['conf_mat'] = metrics.confusion_matrix(pred, label,
  136. self.num_classes)
  137. if mode == 'train':
  138. loss_list = metrics.loss_computation(
  139. logits_list=net_out, labels=inputs[1], losses=self.losses)
  140. loss = sum(loss_list)
  141. outputs['loss'] = loss
  142. return outputs
  143. def default_loss(self):
  144. if isinstance(self.use_mixed_loss, bool):
  145. if self.use_mixed_loss:
  146. losses = [
  147. paddleseg.models.CrossEntropyLoss(),
  148. paddleseg.models.LovaszSoftmaxLoss()
  149. ]
  150. coef = [.8, .2]
  151. loss_type = [
  152. paddleseg.models.MixedLoss(
  153. losses=losses, coef=coef),
  154. ]
  155. else:
  156. loss_type = [paddleseg.models.CrossEntropyLoss()]
  157. else:
  158. losses, coef = list(zip(*self.use_mixed_loss))
  159. if not set(losses).issubset(
  160. ['CrossEntropyLoss', 'DiceLoss', 'LovaszSoftmaxLoss']):
  161. raise ValueError(
  162. "Only 'CrossEntropyLoss', 'DiceLoss', 'LovaszSoftmaxLoss' are supported."
  163. )
  164. losses = [getattr(paddleseg.models, loss)() for loss in losses]
  165. loss_type = [
  166. paddleseg.models.MixedLoss(
  167. losses=losses, coef=list(coef))
  168. ]
  169. if self.model_name == 'FastSCNN':
  170. loss_type *= 2
  171. loss_coef = [1.0, 0.4]
  172. elif self.model_name == 'BiSeNetV2':
  173. loss_type *= 5
  174. loss_coef = [1.0] * 5
  175. else:
  176. loss_coef = [1.0]
  177. losses = {'types': loss_type, 'coef': loss_coef}
  178. return losses
  179. def default_optimizer(self,
  180. parameters,
  181. learning_rate,
  182. num_epochs,
  183. num_steps_each_epoch,
  184. lr_decay_power=0.9):
  185. decay_step = num_epochs * num_steps_each_epoch
  186. lr_scheduler = paddle.optimizer.lr.PolynomialDecay(
  187. learning_rate, decay_step, end_lr=0, power=lr_decay_power)
  188. optimizer = paddle.optimizer.Momentum(
  189. learning_rate=lr_scheduler,
  190. parameters=parameters,
  191. momentum=0.9,
  192. weight_decay=4e-5)
  193. return optimizer
  194. def train(self,
  195. num_epochs,
  196. train_dataset,
  197. train_batch_size=2,
  198. eval_dataset=None,
  199. optimizer=None,
  200. save_interval_epochs=1,
  201. log_interval_steps=2,
  202. save_dir='output',
  203. pretrain_weights='CITYSCAPES',
  204. learning_rate=0.01,
  205. lr_decay_power=0.9,
  206. early_stop=False,
  207. early_stop_patience=5,
  208. use_vdl=True,
  209. resume_checkpoint=None):
  210. """
  211. Train the model.
  212. Args:
  213. num_epochs(int): The number of epochs.
  214. train_dataset(paddlers.dataset): Training dataset.
  215. train_batch_size(int, optional): Total batch size among all cards used in training. Defaults to 2.
  216. eval_dataset(paddlers.dataset, optional):
  217. Evaluation dataset. If None, the model will not be evaluated furing training process. Defaults to None.
  218. optimizer(paddle.optimizer.Optimizer or None, optional):
  219. Optimizer used in training. If None, a default optimizer is used. Defaults to None.
  220. save_interval_epochs(int, optional): Epoch interval for saving the model. Defaults to 1.
  221. log_interval_steps(int, optional): Step interval for printing training information. Defaults to 10.
  222. save_dir(str, optional): Directory to save the model. Defaults to 'output'.
  223. pretrain_weights(str or None, optional):
  224. None or name/path of pretrained weights. If None, no pretrained weights will be loaded. Defaults to 'CITYSCAPES'.
  225. learning_rate(float, optional): Learning rate for training. Defaults to .025.
  226. lr_decay_power(float, optional): Learning decay power. Defaults to .9.
  227. early_stop(bool, optional): Whether to adopt early stop strategy. Defaults to False.
  228. early_stop_patience(int, optional): Early stop patience. Defaults to 5.
  229. use_vdl(bool, optional): Whether to use VisualDL to monitor the training process. Defaults to True.
  230. resume_checkpoint(str or None, optional): The path of the checkpoint to resume training from.
  231. If None, no training checkpoint will be resumed. At most one of `resume_checkpoint` and
  232. `pretrain_weights` can be set simultaneously. Defaults to None.
  233. """
  234. if self.status == 'Infer':
  235. logging.error(
  236. "Exported inference model does not support training.",
  237. exit=True)
  238. if pretrain_weights is not None and resume_checkpoint is not None:
  239. logging.error(
  240. "pretrain_weights and resume_checkpoint cannot be set simultaneously.",
  241. exit=True)
  242. self.labels = train_dataset.labels
  243. if self.losses is None:
  244. self.losses = self.default_loss()
  245. if optimizer is None:
  246. num_steps_each_epoch = train_dataset.num_samples // train_batch_size
  247. self.optimizer = self.default_optimizer(
  248. self.net.parameters(), learning_rate, num_epochs,
  249. num_steps_each_epoch, lr_decay_power)
  250. else:
  251. self.optimizer = optimizer
  252. if pretrain_weights is not None and not osp.exists(pretrain_weights):
  253. if pretrain_weights not in seg_pretrain_weights_dict[
  254. self.model_name]:
  255. logging.warning(
  256. "Path of pretrain_weights('{}') does not exist!".format(
  257. pretrain_weights))
  258. logging.warning("Pretrain_weights is forcibly set to '{}'. "
  259. "If don't want to use pretrain weights, "
  260. "set pretrain_weights to be None.".format(
  261. seg_pretrain_weights_dict[self.model_name][
  262. 0]))
  263. pretrain_weights = seg_pretrain_weights_dict[self.model_name][0]
  264. elif pretrain_weights is not None and osp.exists(pretrain_weights):
  265. if osp.splitext(pretrain_weights)[-1] != '.pdparams':
  266. logging.error(
  267. "Invalid pretrain weights. Please specify a '.pdparams' file.",
  268. exit=True)
  269. pretrained_dir = osp.join(save_dir, 'pretrain')
  270. is_backbone_weights = pretrain_weights == 'IMAGENET'
  271. self.net_initialize(
  272. pretrain_weights=pretrain_weights,
  273. save_dir=pretrained_dir,
  274. resume_checkpoint=resume_checkpoint,
  275. is_backbone_weights=is_backbone_weights)
  276. self.train_loop(
  277. num_epochs=num_epochs,
  278. train_dataset=train_dataset,
  279. train_batch_size=train_batch_size,
  280. eval_dataset=eval_dataset,
  281. save_interval_epochs=save_interval_epochs,
  282. log_interval_steps=log_interval_steps,
  283. save_dir=save_dir,
  284. early_stop=early_stop,
  285. early_stop_patience=early_stop_patience,
  286. use_vdl=use_vdl)
  287. def quant_aware_train(self,
  288. num_epochs,
  289. train_dataset,
  290. train_batch_size=2,
  291. eval_dataset=None,
  292. optimizer=None,
  293. save_interval_epochs=1,
  294. log_interval_steps=2,
  295. save_dir='output',
  296. learning_rate=0.0001,
  297. lr_decay_power=0.9,
  298. early_stop=False,
  299. early_stop_patience=5,
  300. use_vdl=True,
  301. resume_checkpoint=None,
  302. quant_config=None):
  303. """
  304. Quantization-aware training.
  305. Args:
  306. num_epochs(int): The number of epochs.
  307. train_dataset(paddlers.dataset): Training dataset.
  308. train_batch_size(int, optional): Total batch size among all cards used in training. Defaults to 2.
  309. eval_dataset(paddlers.dataset, optional):
  310. Evaluation dataset. If None, the model will not be evaluated furing training process. Defaults to None.
  311. optimizer(paddle.optimizer.Optimizer or None, optional):
  312. Optimizer used in training. If None, a default optimizer is used. Defaults to None.
  313. save_interval_epochs(int, optional): Epoch interval for saving the model. Defaults to 1.
  314. log_interval_steps(int, optional): Step interval for printing training information. Defaults to 10.
  315. save_dir(str, optional): Directory to save the model. Defaults to 'output'.
  316. learning_rate(float, optional): Learning rate for training. Defaults to .025.
  317. lr_decay_power(float, optional): Learning decay power. Defaults to .9.
  318. early_stop(bool, optional): Whether to adopt early stop strategy. Defaults to False.
  319. early_stop_patience(int, optional): Early stop patience. Defaults to 5.
  320. use_vdl(bool, optional): Whether to use VisualDL to monitor the training process. Defaults to True.
  321. quant_config(dict or None, optional): Quantization configuration. If None, a default rule of thumb
  322. configuration will be used. Defaults to None.
  323. resume_checkpoint(str or None, optional): The path of the checkpoint to resume quantization-aware training
  324. from. If None, no training checkpoint will be resumed. Defaults to None.
  325. """
  326. self._prepare_qat(quant_config)
  327. self.train(
  328. num_epochs=num_epochs,
  329. train_dataset=train_dataset,
  330. train_batch_size=train_batch_size,
  331. eval_dataset=eval_dataset,
  332. optimizer=optimizer,
  333. save_interval_epochs=save_interval_epochs,
  334. log_interval_steps=log_interval_steps,
  335. save_dir=save_dir,
  336. pretrain_weights=None,
  337. learning_rate=learning_rate,
  338. lr_decay_power=lr_decay_power,
  339. early_stop=early_stop,
  340. early_stop_patience=early_stop_patience,
  341. use_vdl=use_vdl,
  342. resume_checkpoint=resume_checkpoint)
  343. def evaluate(self, eval_dataset, batch_size=1, return_details=False):
  344. """
  345. Evaluate the model.
  346. Args:
  347. eval_dataset(paddlers.dataset): Evaluation dataset.
  348. batch_size(int, optional): Total batch size among all cards used for evaluation. Defaults to 1.
  349. return_details(bool, optional): Whether to return evaluation details. Defaults to False.
  350. Returns:
  351. collections.OrderedDict with key-value pairs:
  352. {"miou": `mean intersection over union`,
  353. "category_iou": `category-wise mean intersection over union`,
  354. "oacc": `overall accuracy`,
  355. "category_acc": `category-wise accuracy`,
  356. "kappa": ` kappa coefficient`,
  357. "category_F1-score": `F1 score`}.
  358. """
  359. arrange_transforms(
  360. model_type=self.model_type,
  361. transforms=eval_dataset.transforms,
  362. mode='eval')
  363. self.net.eval()
  364. nranks = paddle.distributed.get_world_size()
  365. local_rank = paddle.distributed.get_rank()
  366. if nranks > 1:
  367. # Initialize parallel environment if not done.
  368. if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
  369. ):
  370. paddle.distributed.init_parallel_env()
  371. batch_size_each_card = get_single_card_bs(batch_size)
  372. if batch_size_each_card > 1:
  373. batch_size_each_card = 1
  374. batch_size = batch_size_each_card * paddlers.env_info['num']
  375. logging.warning(
  376. "Segmenter only supports batch_size=1 for each gpu/cpu card " \
  377. "during evaluation, so batch_size " \
  378. "is forcibly set to {}.".format(batch_size))
  379. self.eval_data_loader = self.build_data_loader(
  380. eval_dataset, batch_size=batch_size, mode='eval')
  381. intersect_area_all = 0
  382. pred_area_all = 0
  383. label_area_all = 0
  384. conf_mat_all = []
  385. logging.info(
  386. "Start to evaluate(total_samples={}, total_steps={})...".format(
  387. eval_dataset.num_samples,
  388. math.ceil(eval_dataset.num_samples * 1.0 / batch_size)))
  389. with paddle.no_grad():
  390. for step, data in enumerate(self.eval_data_loader):
  391. data.append(eval_dataset.transforms.transforms)
  392. outputs = self.run(self.net, data, 'eval')
  393. pred_area = outputs['pred_area']
  394. label_area = outputs['label_area']
  395. intersect_area = outputs['intersect_area']
  396. conf_mat = outputs['conf_mat']
  397. # Gather from all ranks
  398. if nranks > 1:
  399. intersect_area_list = []
  400. pred_area_list = []
  401. label_area_list = []
  402. conf_mat_list = []
  403. paddle.distributed.all_gather(intersect_area_list,
  404. intersect_area)
  405. paddle.distributed.all_gather(pred_area_list, pred_area)
  406. paddle.distributed.all_gather(label_area_list, label_area)
  407. paddle.distributed.all_gather(conf_mat_list, conf_mat)
  408. # Some image has been evaluated and should be eliminated in last iter
  409. if (step + 1) * nranks > len(eval_dataset):
  410. valid = len(eval_dataset) - step * nranks
  411. intersect_area_list = intersect_area_list[:valid]
  412. pred_area_list = pred_area_list[:valid]
  413. label_area_list = label_area_list[:valid]
  414. conf_mat_list = conf_mat_list[:valid]
  415. intersect_area_all += sum(intersect_area_list)
  416. pred_area_all += sum(pred_area_list)
  417. label_area_all += sum(label_area_list)
  418. conf_mat_all.extend(conf_mat_list)
  419. else:
  420. intersect_area_all = intersect_area_all + intersect_area
  421. pred_area_all = pred_area_all + pred_area
  422. label_area_all = label_area_all + label_area
  423. conf_mat_all.append(conf_mat)
  424. class_iou, miou = paddleseg.utils.metrics.mean_iou(
  425. intersect_area_all, pred_area_all, label_area_all)
  426. # TODO 确认是按oacc还是macc
  427. class_acc, oacc = paddleseg.utils.metrics.accuracy(intersect_area_all,
  428. pred_area_all)
  429. kappa = paddleseg.utils.metrics.kappa(intersect_area_all, pred_area_all,
  430. label_area_all)
  431. category_f1score = metrics.f1_score(intersect_area_all, pred_area_all,
  432. label_area_all)
  433. eval_metrics = OrderedDict(
  434. zip([
  435. 'miou', 'category_iou', 'oacc', 'category_acc', 'kappa',
  436. 'category_F1-score'
  437. ], [miou, class_iou, oacc, class_acc, kappa, category_f1score]))
  438. if return_details:
  439. conf_mat = sum(conf_mat_all)
  440. eval_details = {'confusion_matrix': conf_mat.tolist()}
  441. return eval_metrics, eval_details
  442. return eval_metrics
  443. def predict(self, img_file, transforms=None):
  444. """
  445. Do inference.
  446. Args:
  447. Args:
  448. img_file(List[np.ndarray or str], str or np.ndarray):
  449. Image path or decoded image data in a BGR format, which also could constitute a list,
  450. meaning all images to be predicted as a mini-batch.
  451. transforms(paddlers.transforms.Compose or None, optional):
  452. Transforms for inputs. If None, the transforms for evaluation process will be used. Defaults to None.
  453. Returns:
  454. If img_file is a string or np.array, the result is a dict with key-value pairs:
  455. {"label map": `label map`, "score_map": `score map`}.
  456. If img_file is a list, the result is a list composed of dicts with the corresponding fields:
  457. label_map(np.ndarray): the predicted label map (HW)
  458. score_map(np.ndarray): the prediction score map (HWC)
  459. """
  460. if transforms is None and not hasattr(self, 'test_transforms'):
  461. raise Exception("transforms need to be defined, now is None.")
  462. if transforms is None:
  463. transforms = self.test_transforms
  464. if isinstance(img_file, (str, np.ndarray)):
  465. images = [img_file]
  466. else:
  467. images = img_file
  468. batch_im, batch_origin_shape = self._preprocess(images, transforms,
  469. self.model_type)
  470. self.net.eval()
  471. data = (batch_im, batch_origin_shape, transforms.transforms)
  472. outputs = self.run(self.net, data, 'test')
  473. label_map_list = outputs['label_map']
  474. score_map_list = outputs['score_map']
  475. if isinstance(img_file, list):
  476. prediction = [{
  477. 'label_map': l,
  478. 'score_map': s
  479. } for l, s in zip(label_map_list, score_map_list)]
  480. else:
  481. prediction = {
  482. 'label_map': label_map_list[0],
  483. 'score_map': score_map_list[0]
  484. }
  485. return prediction
  486. def slider_predict(self, img_file, save_dir, block_size, overlap=36, transforms=None):
  487. """
  488. Do inference.
  489. Args:
  490. Args:
  491. img_file(str):
  492. Image path.
  493. save_dir(str):
  494. Directory that contains saved geotiff file.
  495. block_size(List[int] or Tuple[int], int):
  496. The size of block.
  497. overlap(List[int] or Tuple[int], int):
  498. The overlap between two blocks. Defaults to 36.
  499. transforms(paddlers.transforms.Compose or None, optional):
  500. Transforms for inputs. If None, the transforms for evaluation process will be used. Defaults to None.
  501. """
  502. try:
  503. from osgeo import gdal
  504. except:
  505. import gdal
  506. if isinstance(block_size, int):
  507. block_size = (block_size, block_size)
  508. elif isinstance(block_size, (tuple, list)) and len(block_size) == 2:
  509. block_size = tuple(block_size)
  510. else:
  511. raise ValueError("`block_size` must be a tuple/list of length 2 or an integer.")
  512. if isinstance(overlap, int):
  513. overlap = (overlap, overlap)
  514. elif isinstance(overlap, (tuple, list)) and len(overlap) == 2:
  515. overlap = tuple(overlap)
  516. else:
  517. raise ValueError("`overlap` must be a tuple/list of length 2 or an integer.")
  518. src_data = gdal.Open(img_file)
  519. width = src_data.RasterXSize
  520. height = src_data.RasterYSize
  521. bands = src_data.RasterCount
  522. driver = gdal.GetDriverByName("GTiff")
  523. file_name = osp.splitext(osp.normpath(img_file).split(os.sep)[-1])[0] + ".tif"
  524. if not osp.exists(save_dir):
  525. os.makedirs(save_dir)
  526. save_file = osp.join(save_dir, file_name)
  527. dst_data = driver.Create(save_file, width, height, 1, gdal.GDT_Byte)
  528. dst_data.SetGeoTransform(src_data.GetGeoTransform())
  529. dst_data.SetProjection(src_data.GetProjection())
  530. band = dst_data.GetRasterBand(1)
  531. band.WriteArray(255 * np.ones((height, width), dtype="uint8"))
  532. step = np.array(block_size) - np.array(overlap)
  533. for yoff in range(0, height, step[1]):
  534. for xoff in range(0, width, step[0]):
  535. xsize, ysize = block_size
  536. if xoff + xsize > width:
  537. xsize = int(width - xoff)
  538. if yoff + ysize > height:
  539. ysize = int(height - yoff)
  540. im = src_data.ReadAsArray(int(xoff), int(yoff), xsize, ysize).transpose((1, 2, 0))
  541. # fill
  542. h, w = im.shape[:2]
  543. im_fill = np.zeros((block_size[1], block_size[0], bands), dtype=im.dtype)
  544. im_fill[:h, :w, :] = im
  545. # predict
  546. pred = self.predict(im_fill, transforms)["label_map"].astype("uint8")
  547. # overlap
  548. rd_block = band.ReadAsArray(int(xoff), int(yoff), xsize, ysize)
  549. mask = (rd_block == pred[:h, :w]) | (rd_block == 255)
  550. temp = pred[:h, :w].copy()
  551. temp[mask == False] = 0
  552. band.WriteArray(temp, int(xoff), int(yoff))
  553. dst_data.FlushCache()
  554. dst_data = None
  555. print("GeoTiff saved in {}.".format(save_file))
  556. def _preprocess(self, images, transforms, to_tensor=True):
  557. arrange_transforms(
  558. model_type=self.model_type, transforms=transforms, mode='test')
  559. batch_im = list()
  560. batch_ori_shape = list()
  561. for im in images:
  562. sample = {'image': im}
  563. if isinstance(sample['image'], str):
  564. sample = ImgDecoder(to_rgb=False)(sample)
  565. ori_shape = sample['image'].shape[:2]
  566. im = transforms(sample)[0]
  567. batch_im.append(im)
  568. batch_ori_shape.append(ori_shape)
  569. if to_tensor:
  570. batch_im = paddle.to_tensor(batch_im)
  571. else:
  572. batch_im = np.asarray(batch_im)
  573. return batch_im, batch_ori_shape
  574. @staticmethod
  575. def get_transforms_shape_info(batch_ori_shape, transforms):
  576. batch_restore_list = list()
  577. for ori_shape in batch_ori_shape:
  578. restore_list = list()
  579. h, w = ori_shape[0], ori_shape[1]
  580. for op in transforms:
  581. if op.__class__.__name__ == 'Resize':
  582. restore_list.append(('resize', (h, w)))
  583. h, w = op.target_size
  584. elif op.__class__.__name__ == 'ResizeByShort':
  585. restore_list.append(('resize', (h, w)))
  586. im_short_size = min(h, w)
  587. im_long_size = max(h, w)
  588. scale = float(op.short_size) / float(im_short_size)
  589. if 0 < op.max_size < np.round(scale * im_long_size):
  590. scale = float(op.max_size) / float(im_long_size)
  591. h = int(round(h * scale))
  592. w = int(round(w * scale))
  593. elif op.__class__.__name__ == 'ResizeByLong':
  594. restore_list.append(('resize', (h, w)))
  595. im_long_size = max(h, w)
  596. scale = float(op.long_size) / float(im_long_size)
  597. h = int(round(h * scale))
  598. w = int(round(w * scale))
  599. elif op.__class__.__name__ == 'Padding':
  600. if op.target_size:
  601. target_h, target_w = op.target_size
  602. else:
  603. target_h = int(
  604. (np.ceil(h / op.size_divisor) * op.size_divisor))
  605. target_w = int(
  606. (np.ceil(w / op.size_divisor) * op.size_divisor))
  607. if op.pad_mode == -1:
  608. offsets = op.offsets
  609. elif op.pad_mode == 0:
  610. offsets = [0, 0]
  611. elif op.pad_mode == 1:
  612. offsets = [(target_h - h) // 2, (target_w - w) // 2]
  613. else:
  614. offsets = [target_h - h, target_w - w]
  615. restore_list.append(('padding', (h, w), offsets))
  616. h, w = target_h, target_w
  617. batch_restore_list.append(restore_list)
  618. return batch_restore_list
  619. def _postprocess(self, batch_pred, batch_origin_shape, transforms):
  620. batch_restore_list = BaseSegmenter.get_transforms_shape_info(
  621. batch_origin_shape, transforms)
  622. if isinstance(batch_pred, (tuple, list)) and self.status == 'Infer':
  623. return self._infer_postprocess(
  624. batch_label_map=batch_pred[0],
  625. batch_score_map=batch_pred[1],
  626. batch_restore_list=batch_restore_list)
  627. results = []
  628. if batch_pred.dtype == paddle.float32:
  629. mode = 'bilinear'
  630. else:
  631. mode = 'nearest'
  632. for pred, restore_list in zip(batch_pred, batch_restore_list):
  633. pred = paddle.unsqueeze(pred, axis=0)
  634. for item in restore_list[::-1]:
  635. h, w = item[1][0], item[1][1]
  636. if item[0] == 'resize':
  637. pred = F.interpolate(
  638. pred, (h, w), mode=mode, data_format='NCHW')
  639. elif item[0] == 'padding':
  640. x, y = item[2]
  641. pred = pred[:, :, y:y + h, x:x + w]
  642. else:
  643. pass
  644. results.append(pred)
  645. return results
  646. def _infer_postprocess(self, batch_label_map, batch_score_map,
  647. batch_restore_list):
  648. label_maps = []
  649. score_maps = []
  650. for label_map, score_map, restore_list in zip(
  651. batch_label_map, batch_score_map, batch_restore_list):
  652. if not isinstance(label_map, np.ndarray):
  653. label_map = paddle.unsqueeze(label_map, axis=[0, 3])
  654. score_map = paddle.unsqueeze(score_map, axis=0)
  655. for item in restore_list[::-1]:
  656. h, w = item[1][0], item[1][1]
  657. if item[0] == 'resize':
  658. if isinstance(label_map, np.ndarray):
  659. label_map = cv2.resize(
  660. label_map, (w, h), interpolation=cv2.INTER_NEAREST)
  661. score_map = cv2.resize(
  662. score_map, (w, h), interpolation=cv2.INTER_LINEAR)
  663. else:
  664. label_map = F.interpolate(
  665. label_map, (h, w),
  666. mode='nearest',
  667. data_format='NHWC')
  668. score_map = F.interpolate(
  669. score_map, (h, w),
  670. mode='bilinear',
  671. data_format='NHWC')
  672. elif item[0] == 'padding':
  673. x, y = item[2]
  674. if isinstance(label_map, np.ndarray):
  675. label_map = label_map[..., y:y + h, x:x + w]
  676. score_map = score_map[..., y:y + h, x:x + w]
  677. else:
  678. label_map = label_map[:, :, y:y + h, x:x + w]
  679. score_map = score_map[:, :, y:y + h, x:x + w]
  680. else:
  681. pass
  682. label_map = label_map.squeeze()
  683. score_map = score_map.squeeze()
  684. if not isinstance(label_map, np.ndarray):
  685. label_map = label_map.numpy()
  686. score_map = score_map.numpy()
  687. label_maps.append(label_map.squeeze())
  688. score_maps.append(score_map.squeeze())
  689. return label_maps, score_maps
  690. class UNet(BaseSegmenter):
  691. def __init__(self,
  692. input_channel=3,
  693. num_classes=2,
  694. use_mixed_loss=False,
  695. use_deconv=False,
  696. align_corners=False,
  697. **params):
  698. params.update({
  699. 'use_deconv': use_deconv,
  700. 'align_corners': align_corners
  701. })
  702. super(UNet, self).__init__(
  703. model_name='UNet',
  704. input_channel=input_channel,
  705. num_classes=num_classes,
  706. use_mixed_loss=use_mixed_loss,
  707. **params)
  708. class DeepLabV3P(BaseSegmenter):
  709. def __init__(self,
  710. input_channel=3,
  711. num_classes=2,
  712. backbone='ResNet50_vd',
  713. use_mixed_loss=False,
  714. output_stride=8,
  715. backbone_indices=(0, 3),
  716. aspp_ratios=(1, 12, 24, 36),
  717. aspp_out_channels=256,
  718. align_corners=False,
  719. **params):
  720. self.backbone_name = backbone
  721. if backbone not in ['ResNet50_vd', 'ResNet101_vd']:
  722. raise ValueError(
  723. "backbone: {} is not supported. Please choose one of "
  724. "('ResNet50_vd', 'ResNet101_vd')".format(backbone))
  725. if params.get('with_net', True):
  726. with DisablePrint():
  727. backbone = getattr(paddleseg.models, backbone)(
  728. input_channel=input_channel, output_stride=output_stride)
  729. else:
  730. backbone = None
  731. params.update({
  732. 'backbone': backbone,
  733. 'backbone_indices': backbone_indices,
  734. 'aspp_ratios': aspp_ratios,
  735. 'aspp_out_channels': aspp_out_channels,
  736. 'align_corners': align_corners
  737. })
  738. super(DeepLabV3P, self).__init__(
  739. model_name='DeepLabV3P',
  740. num_classes=num_classes,
  741. use_mixed_loss=use_mixed_loss,
  742. **params)
  743. class FastSCNN(BaseSegmenter):
  744. def __init__(self,
  745. num_classes=2,
  746. use_mixed_loss=False,
  747. align_corners=False,
  748. **params):
  749. params.update({'align_corners': align_corners})
  750. super(FastSCNN, self).__init__(
  751. model_name='FastSCNN',
  752. num_classes=num_classes,
  753. use_mixed_loss=use_mixed_loss,
  754. **params)
  755. class HRNet(BaseSegmenter):
  756. def __init__(self,
  757. num_classes=2,
  758. width=48,
  759. use_mixed_loss=False,
  760. align_corners=False,
  761. **params):
  762. if width not in (18, 48):
  763. raise ValueError(
  764. "width={} is not supported, please choose from [18, 48]".format(
  765. width))
  766. self.backbone_name = 'HRNet_W{}'.format(width)
  767. if params.get('with_net', True):
  768. with DisablePrint():
  769. backbone = getattr(paddleseg.models, self.backbone_name)(
  770. align_corners=align_corners)
  771. else:
  772. backbone = None
  773. params.update({'backbone': backbone, 'align_corners': align_corners})
  774. super(HRNet, self).__init__(
  775. model_name='FCN',
  776. num_classes=num_classes,
  777. use_mixed_loss=use_mixed_loss,
  778. **params)
  779. self.model_name = 'HRNet'
  780. class BiSeNetV2(BaseSegmenter):
  781. def __init__(self,
  782. num_classes=2,
  783. use_mixed_loss=False,
  784. align_corners=False,
  785. **params):
  786. params.update({'align_corners': align_corners})
  787. super(BiSeNetV2, self).__init__(
  788. model_name='BiSeNetV2',
  789. num_classes=num_classes,
  790. use_mixed_loss=use_mixed_loss,
  791. **params)
  792. class FarSeg(BaseSegmenter):
  793. def __init__(self, num_classes=2, use_mixed_loss=False, **params):
  794. super(FarSeg, self).__init__(
  795. model_name='FarSeg',
  796. num_classes=num_classes,
  797. use_mixed_loss=use_mixed_loss,
  798. **params)