classifier.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import math
  15. import os.path as osp
  16. from collections import OrderedDict
  17. from operator import itemgetter
  18. import numpy as np
  19. import paddle
  20. import paddle.nn.functional as F
  21. from paddle.static import InputSpec
  22. import paddlers
  23. import paddlers.models.ppcls as ppcls
  24. import paddlers.rs_models.clas as cmcls
  25. import paddlers.utils.logging as logging
  26. from paddlers.utils import get_single_card_bs, DisablePrint
  27. from paddlers.models.ppcls.metric import build_metrics
  28. from paddlers.models import clas_losses
  29. from paddlers.models.ppcls.data.postprocess import build_postprocess
  30. from paddlers.utils.checkpoint import cls_pretrain_weights_dict
  31. from paddlers.transforms import Resize, decode_image
  32. from .base import BaseModel
  33. __all__ = [
  34. "ResNet50_vd", "MobileNetV3_small_x1_0", "HRNet_W18_C", "CondenseNetV2_b"
  35. ]
  36. class BaseClassifier(BaseModel):
  37. def __init__(self,
  38. model_name,
  39. in_channels=3,
  40. num_classes=2,
  41. use_mixed_loss=False,
  42. losses=None,
  43. **params):
  44. self.init_params = locals()
  45. if 'with_net' in self.init_params:
  46. del self.init_params['with_net']
  47. super(BaseClassifier, self).__init__('classifier')
  48. if not hasattr(ppcls.arch.backbone, model_name) and \
  49. not hasattr(cmcls, model_name):
  50. raise ValueError("ERROR: There is no model named {}.".format(
  51. model_name))
  52. self.model_name = model_name
  53. self.in_channels = in_channels
  54. self.num_classes = num_classes
  55. self.use_mixed_loss = use_mixed_loss
  56. self.metrics = None
  57. self.losses = losses
  58. self.labels = None
  59. self.postprocess = None
  60. if params.get('with_net', True):
  61. params.pop('with_net', None)
  62. self.net = self.build_net(**params)
  63. self.find_unused_parameters = True
  64. def build_net(self, **params):
  65. with paddle.utils.unique_name.guard():
  66. model = dict(ppcls.arch.backbone.__dict__,
  67. **cmcls.__dict__)[self.model_name]
  68. # TODO: Determine whether there is in_channels
  69. try:
  70. net = model(
  71. class_num=self.num_classes,
  72. in_channels=self.in_channels,
  73. **params)
  74. except:
  75. net = model(class_num=self.num_classes, **params)
  76. self.in_channels = 3
  77. return net
  78. def _fix_transforms_shape(self, image_shape):
  79. if hasattr(self, 'test_transforms'):
  80. if self.test_transforms is not None:
  81. has_resize_op = False
  82. resize_op_idx = -1
  83. normalize_op_idx = len(self.test_transforms.transforms)
  84. for idx, op in enumerate(self.test_transforms.transforms):
  85. name = op.__class__.__name__
  86. if name == 'Normalize':
  87. normalize_op_idx = idx
  88. if 'Resize' in name:
  89. has_resize_op = True
  90. resize_op_idx = idx
  91. if not has_resize_op:
  92. self.test_transforms.transforms.insert(
  93. normalize_op_idx, Resize(target_size=image_shape))
  94. else:
  95. self.test_transforms.transforms[resize_op_idx] = Resize(
  96. target_size=image_shape)
  97. def _get_test_inputs(self, image_shape):
  98. if image_shape is not None:
  99. if len(image_shape) == 2:
  100. image_shape = [1, 3] + image_shape
  101. self._fix_transforms_shape(image_shape[-2:])
  102. else:
  103. image_shape = [None, 3, -1, -1]
  104. self.fixed_input_shape = image_shape
  105. input_spec = [
  106. InputSpec(
  107. shape=image_shape, name='image', dtype='float32')
  108. ]
  109. return input_spec
  110. def run(self, net, inputs, mode):
  111. net_out = net(inputs[0])
  112. if mode == 'test':
  113. return self.postprocess(net_out)
  114. outputs = OrderedDict()
  115. label = paddle.to_tensor(inputs[1], dtype="int64")
  116. if mode == 'eval':
  117. label = paddle.unsqueeze(label, axis=-1)
  118. metric_dict = self.metrics(net_out, label)
  119. outputs['top1'] = metric_dict["top1"]
  120. outputs['top5'] = metric_dict["top5"]
  121. if mode == 'train':
  122. loss_list = self.losses(net_out, label)
  123. outputs['loss'] = loss_list['loss']
  124. return outputs
  125. def default_metric(self):
  126. default_config = [{"TopkAcc": {"topk": [1, 5]}}]
  127. return build_metrics(default_config)
  128. def default_loss(self):
  129. # TODO: use mixed loss and other loss
  130. default_config = [{"CELoss": {"weight": 1.0}}]
  131. return clas_losses.build_loss(default_config)
  132. def default_optimizer(self,
  133. parameters,
  134. learning_rate,
  135. num_epochs,
  136. num_steps_each_epoch,
  137. last_epoch=-1,
  138. L2_coeff=0.00007):
  139. decay_step = num_epochs * num_steps_each_epoch
  140. lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
  141. learning_rate, T_max=decay_step, eta_min=0, last_epoch=last_epoch)
  142. optimizer = paddle.optimizer.Momentum(
  143. learning_rate=lr_scheduler,
  144. parameters=parameters,
  145. momentum=0.9,
  146. weight_decay=paddle.regularizer.L2Decay(L2_coeff))
  147. return optimizer
  148. def default_postprocess(self, class_id_map_file):
  149. default_config = {
  150. "name": "Topk",
  151. "topk": 1,
  152. "class_id_map_file": class_id_map_file
  153. }
  154. return build_postprocess(default_config)
  155. def build_postprocess_from_labels(self, topk=1):
  156. label_dict = dict()
  157. for i, label in enumerate(self.labels):
  158. label_dict[i] = label
  159. self.postprocess = build_postprocess({
  160. "name": "Topk",
  161. "topk": topk,
  162. "class_id_map_file": None
  163. })
  164. # Add class_id_map from model.yml
  165. self.postprocess.class_id_map = label_dict
  166. def train(self,
  167. num_epochs,
  168. train_dataset,
  169. train_batch_size=2,
  170. eval_dataset=None,
  171. optimizer=None,
  172. save_interval_epochs=1,
  173. log_interval_steps=2,
  174. save_dir='output',
  175. pretrain_weights='IMAGENET',
  176. learning_rate=0.1,
  177. lr_decay_power=0.9,
  178. early_stop=False,
  179. early_stop_patience=5,
  180. use_vdl=True,
  181. resume_checkpoint=None):
  182. """
  183. Train the model.
  184. Args:
  185. num_epochs (int): Number of epochs.
  186. train_dataset (paddlers.datasets.ClasDataset): Training dataset.
  187. train_batch_size (int, optional): Total batch size among all cards used in
  188. training. Defaults to 2.
  189. eval_dataset (paddlers.datasets.ClasDataset|None, optional): Evaluation dataset.
  190. If None, the model will not be evaluated during training process.
  191. Defaults to None.
  192. optimizer (paddle.optimizer.Optimizer|None, optional): Optimizer used in
  193. training. If None, a default optimizer will be used. Defaults to None.
  194. save_interval_epochs (int, optional): Epoch interval for saving the model.
  195. Defaults to 1.
  196. log_interval_steps (int, optional): Step interval for printing training
  197. information. Defaults to 2.
  198. save_dir (str, optional): Directory to save the model. Defaults to 'output'.
  199. pretrain_weights (str|None, optional): None or name/path of pretrained
  200. weights. If None, no pretrained weights will be loaded.
  201. Defaults to 'IMAGENET'.
  202. learning_rate (float, optional): Learning rate for training.
  203. Defaults to .1.
  204. lr_decay_power (float, optional): Learning decay power. Defaults to .9.
  205. early_stop (bool, optional): Whether to adopt early stop strategy.
  206. Defaults to False.
  207. early_stop_patience (int, optional): Early stop patience. Defaults to 5.
  208. use_vdl (bool, optional): Whether to use VisualDL to monitor the training
  209. process. Defaults to True.
  210. resume_checkpoint (str|None, optional): Path of the checkpoint to resume
  211. training from. If None, no training checkpoint will be resumed. At most
  212. Aone of `resume_checkpoint` and `pretrain_weights` can be set simultaneously.
  213. Defaults to None.
  214. """
  215. if self.status == 'Infer':
  216. logging.error(
  217. "Exported inference model does not support training.",
  218. exit=True)
  219. if pretrain_weights is not None and resume_checkpoint is not None:
  220. logging.error(
  221. "pretrain_weights and resume_checkpoint cannot be set simultaneously.",
  222. exit=True)
  223. self.labels = train_dataset.labels
  224. if self.losses is None:
  225. self.losses = self.default_loss()
  226. self.metrics = self.default_metric()
  227. self.postprocess = self.default_postprocess(train_dataset.label_list)
  228. if optimizer is None:
  229. num_steps_each_epoch = train_dataset.num_samples // train_batch_size
  230. self.optimizer = self.default_optimizer(
  231. self.net.parameters(), learning_rate, num_epochs,
  232. num_steps_each_epoch, lr_decay_power)
  233. else:
  234. self.optimizer = optimizer
  235. if pretrain_weights is not None and not osp.exists(pretrain_weights):
  236. if pretrain_weights not in cls_pretrain_weights_dict[
  237. self.model_name]:
  238. logging.warning(
  239. "Path of pretrain_weights('{}') does not exist!".format(
  240. pretrain_weights))
  241. logging.warning("Pretrain_weights is forcibly set to '{}'. "
  242. "If don't want to use pretrain weights, "
  243. "set pretrain_weights to be None.".format(
  244. cls_pretrain_weights_dict[self.model_name][
  245. 0]))
  246. pretrain_weights = cls_pretrain_weights_dict[self.model_name][0]
  247. elif pretrain_weights is not None and osp.exists(pretrain_weights):
  248. if osp.splitext(pretrain_weights)[-1] != '.pdparams':
  249. logging.error(
  250. "Invalid pretrain weights. Please specify a '.pdparams' file.",
  251. exit=True)
  252. pretrained_dir = osp.join(save_dir, 'pretrain')
  253. is_backbone_weights = False # pretrain_weights == 'IMAGENET' # TODO: this is backbone
  254. self.net_initialize(
  255. pretrain_weights=pretrain_weights,
  256. save_dir=pretrained_dir,
  257. resume_checkpoint=resume_checkpoint,
  258. is_backbone_weights=is_backbone_weights)
  259. self.train_loop(
  260. num_epochs=num_epochs,
  261. train_dataset=train_dataset,
  262. train_batch_size=train_batch_size,
  263. eval_dataset=eval_dataset,
  264. save_interval_epochs=save_interval_epochs,
  265. log_interval_steps=log_interval_steps,
  266. save_dir=save_dir,
  267. early_stop=early_stop,
  268. early_stop_patience=early_stop_patience,
  269. use_vdl=use_vdl)
  270. def quant_aware_train(self,
  271. num_epochs,
  272. train_dataset,
  273. train_batch_size=2,
  274. eval_dataset=None,
  275. optimizer=None,
  276. save_interval_epochs=1,
  277. log_interval_steps=2,
  278. save_dir='output',
  279. learning_rate=0.0001,
  280. lr_decay_power=0.9,
  281. early_stop=False,
  282. early_stop_patience=5,
  283. use_vdl=True,
  284. resume_checkpoint=None,
  285. quant_config=None):
  286. """
  287. Quantization-aware training.
  288. Args:
  289. num_epochs (int): Number of epochs.
  290. train_dataset (paddlers.datasets.ClasDataset): Training dataset.
  291. train_batch_size (int, optional): Total batch size among all cards used in
  292. training. Defaults to 2.
  293. eval_dataset (paddlers.datasets.ClasDataset|None, optional): Evaluation dataset.
  294. If None, the model will not be evaluated during training process.
  295. Defaults to None.
  296. optimizer (paddle.optimizer.Optimizer|None, optional): Optimizer used in
  297. training. If None, a default optimizer will be used. Defaults to None.
  298. save_interval_epochs (int, optional): Epoch interval for saving the model.
  299. Defaults to 1.
  300. log_interval_steps (int, optional): Step interval for printing training
  301. information. Defaults to 2.
  302. save_dir (str, optional): Directory to save the model. Defaults to 'output'.
  303. learning_rate (float, optional): Learning rate for training.
  304. Defaults to .0001.
  305. lr_decay_power (float, optional): Learning decay power. Defaults to .9.
  306. early_stop (bool, optional): Whether to adopt early stop strategy.
  307. Defaults to False.
  308. early_stop_patience (int, optional): Early stop patience. Defaults to 5.
  309. use_vdl (bool, optional): Whether to use VisualDL to monitor the training
  310. process. Defaults to True.
  311. quant_config (dict|None, optional): Quantization configuration. If None,
  312. a default rule of thumb configuration will be used. Defaults to None.
  313. resume_checkpoint (str|None, optional): Path of the checkpoint to resume
  314. quantization-aware training from. If None, no training checkpoint will
  315. be resumed. Defaults to None.
  316. """
  317. self._prepare_qat(quant_config)
  318. self.train(
  319. num_epochs=num_epochs,
  320. train_dataset=train_dataset,
  321. train_batch_size=train_batch_size,
  322. eval_dataset=eval_dataset,
  323. optimizer=optimizer,
  324. save_interval_epochs=save_interval_epochs,
  325. log_interval_steps=log_interval_steps,
  326. save_dir=save_dir,
  327. pretrain_weights=None,
  328. learning_rate=learning_rate,
  329. lr_decay_power=lr_decay_power,
  330. early_stop=early_stop,
  331. early_stop_patience=early_stop_patience,
  332. use_vdl=use_vdl,
  333. resume_checkpoint=resume_checkpoint)
  334. def evaluate(self, eval_dataset, batch_size=1, return_details=False):
  335. """
  336. Evaluate the model.
  337. Args:
  338. eval_dataset (paddlers.datasets.ClasDataset): Evaluation dataset.
  339. batch_size (int, optional): Total batch size among all cards used for
  340. evaluation. Defaults to 1.
  341. return_details (bool, optional): Whether to return evaluation details.
  342. Defaults to False.
  343. Returns:
  344. collections.OrderedDict with key-value pairs:
  345. {"top1": `acc of top1`,
  346. "top5": `acc of top5`}.
  347. """
  348. self._check_transforms(eval_dataset.transforms, 'eval')
  349. self.net.eval()
  350. nranks = paddle.distributed.get_world_size()
  351. local_rank = paddle.distributed.get_rank()
  352. if nranks > 1:
  353. # Initialize parallel environment if not done.
  354. if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
  355. ):
  356. paddle.distributed.init_parallel_env()
  357. batch_size_each_card = get_single_card_bs(batch_size)
  358. if batch_size_each_card > 1:
  359. batch_size_each_card = 1
  360. batch_size = batch_size_each_card * paddlers.env_info['num']
  361. logging.warning(
  362. "Classifier only supports batch_size=1 for each gpu/cpu card " \
  363. "during evaluation, so batch_size " \
  364. "is forcibly set to {}.".format(batch_size))
  365. self.eval_data_loader = self.build_data_loader(
  366. eval_dataset, batch_size=batch_size, mode='eval')
  367. logging.info(
  368. "Start to evaluate(total_samples={}, total_steps={})...".format(
  369. eval_dataset.num_samples,
  370. math.ceil(eval_dataset.num_samples * 1.0 / batch_size)))
  371. top1s = []
  372. top5s = []
  373. with paddle.no_grad():
  374. for step, data in enumerate(self.eval_data_loader):
  375. data.append(eval_dataset.transforms.transforms)
  376. outputs = self.run(self.net, data, 'eval')
  377. top1s.append(outputs["top1"])
  378. top5s.append(outputs["top5"])
  379. top1 = np.mean(top1s)
  380. top5 = np.mean(top5s)
  381. eval_metrics = OrderedDict(zip(['top1', 'top5'], [top1, top5]))
  382. if return_details:
  383. # TODO: add details
  384. return eval_metrics, None
  385. return eval_metrics
  386. def predict(self, img_file, transforms=None):
  387. """
  388. Do inference.
  389. Args:
  390. img_file (list[np.ndarray|str] | str | np.ndarray): Image path or decoded
  391. image data, which also could constitute a list, meaning all images to be
  392. predicted as a mini-batch.
  393. transforms (paddlers.transforms.Compose|None, optional): Transforms for
  394. inputs. If None, the transforms for evaluation process will be used.
  395. Defaults to None.
  396. Returns:
  397. If `img_file` is a string or np.array, the result is a dict with key-value
  398. pairs:
  399. {"label map": `class_ids_map`,
  400. "scores_map": `scores_map`,
  401. "label_names_map": `label_names_map`}.
  402. If `img_file` is a list, the result is a list composed of dicts with the
  403. corresponding fields:
  404. class_ids_map (np.ndarray): class_ids
  405. scores_map (np.ndarray): scores
  406. label_names_map (np.ndarray): label_names
  407. """
  408. if transforms is None and not hasattr(self, 'test_transforms'):
  409. raise ValueError("transforms need to be defined, now is None.")
  410. if transforms is None:
  411. transforms = self.test_transforms
  412. if isinstance(img_file, (str, np.ndarray)):
  413. images = [img_file]
  414. else:
  415. images = img_file
  416. batch_im, batch_origin_shape = self.preprocess(images, transforms,
  417. self.model_type)
  418. self.net.eval()
  419. data = (batch_im, batch_origin_shape, transforms.transforms)
  420. if self.postprocess is None:
  421. self.build_postprocess_from_labels()
  422. outputs = self.run(self.net, data, 'test')
  423. class_ids = map(itemgetter('class_ids'), outputs)
  424. scores = map(itemgetter('scores'), outputs)
  425. label_names = map(itemgetter('label_names'), outputs)
  426. if isinstance(img_file, list):
  427. prediction = [{
  428. 'class_ids_map': l,
  429. 'scores_map': s,
  430. 'label_names_map': n,
  431. } for l, s, n in zip(class_ids, scores, label_names)]
  432. else:
  433. prediction = {
  434. 'class_ids_map': next(class_ids),
  435. 'scores_map': next(scores),
  436. 'label_names_map': next(label_names)
  437. }
  438. return prediction
  439. def preprocess(self, images, transforms, to_tensor=True):
  440. self._check_transforms(transforms, 'test')
  441. batch_im = list()
  442. batch_ori_shape = list()
  443. for im in images:
  444. if isinstance(im, str):
  445. im = decode_image(im, to_rgb=False)
  446. ori_shape = im.shape[:2]
  447. sample = {'image': im}
  448. im = transforms(sample)
  449. batch_im.append(im)
  450. batch_ori_shape.append(ori_shape)
  451. if to_tensor:
  452. batch_im = paddle.to_tensor(batch_im)
  453. else:
  454. batch_im = np.asarray(batch_im)
  455. return batch_im, batch_ori_shape
  456. @staticmethod
  457. def get_transforms_shape_info(batch_ori_shape, transforms):
  458. batch_restore_list = list()
  459. for ori_shape in batch_ori_shape:
  460. restore_list = list()
  461. h, w = ori_shape[0], ori_shape[1]
  462. for op in transforms:
  463. if op.__class__.__name__ == 'Resize':
  464. restore_list.append(('resize', (h, w)))
  465. h, w = op.target_size
  466. elif op.__class__.__name__ == 'ResizeByShort':
  467. restore_list.append(('resize', (h, w)))
  468. im_short_size = min(h, w)
  469. im_long_size = max(h, w)
  470. scale = float(op.short_size) / float(im_short_size)
  471. if 0 < op.max_size < np.round(scale * im_long_size):
  472. scale = float(op.max_size) / float(im_long_size)
  473. h = int(round(h * scale))
  474. w = int(round(w * scale))
  475. elif op.__class__.__name__ == 'ResizeByLong':
  476. restore_list.append(('resize', (h, w)))
  477. im_long_size = max(h, w)
  478. scale = float(op.long_size) / float(im_long_size)
  479. h = int(round(h * scale))
  480. w = int(round(w * scale))
  481. elif op.__class__.__name__ == 'Pad':
  482. if op.target_size:
  483. target_h, target_w = op.target_size
  484. else:
  485. target_h = int(
  486. (np.ceil(h / op.size_divisor) * op.size_divisor))
  487. target_w = int(
  488. (np.ceil(w / op.size_divisor) * op.size_divisor))
  489. if op.pad_mode == -1:
  490. offsets = op.offsets
  491. elif op.pad_mode == 0:
  492. offsets = [0, 0]
  493. elif op.pad_mode == 1:
  494. offsets = [(target_h - h) // 2, (target_w - w) // 2]
  495. else:
  496. offsets = [target_h - h, target_w - w]
  497. restore_list.append(('padding', (h, w), offsets))
  498. h, w = target_h, target_w
  499. batch_restore_list.append(restore_list)
  500. return batch_restore_list
  501. def _check_transforms(self, transforms, mode):
  502. super()._check_transforms(transforms, mode)
  503. if not isinstance(transforms.arrange,
  504. paddlers.transforms.ArrangeClassifier):
  505. raise TypeError(
  506. "`transforms.arrange` must be an ArrangeClassifier object.")
  507. class ResNet50_vd(BaseClassifier):
  508. def __init__(self,
  509. num_classes=2,
  510. use_mixed_loss=False,
  511. losses=None,
  512. **params):
  513. super(ResNet50_vd, self).__init__(
  514. model_name='ResNet50_vd',
  515. num_classes=num_classes,
  516. use_mixed_loss=use_mixed_loss,
  517. losses=losses,
  518. **params)
  519. class MobileNetV3_small_x1_0(BaseClassifier):
  520. def __init__(self,
  521. num_classes=2,
  522. use_mixed_loss=False,
  523. losses=None,
  524. **params):
  525. super(MobileNetV3_small_x1_0, self).__init__(
  526. model_name='MobileNetV3_small_x1_0',
  527. num_classes=num_classes,
  528. use_mixed_loss=use_mixed_loss,
  529. losses=losses,
  530. **params)
  531. class HRNet_W18_C(BaseClassifier):
  532. def __init__(self,
  533. num_classes=2,
  534. use_mixed_loss=False,
  535. losses=None,
  536. **params):
  537. super(HRNet_W18_C, self).__init__(
  538. model_name='HRNet_W18_C',
  539. num_classes=num_classes,
  540. use_mixed_loss=use_mixed_loss,
  541. losses=losses,
  542. **params)
  543. class CondenseNetV2_b(BaseClassifier):
  544. def __init__(self,
  545. num_classes=2,
  546. use_mixed_loss=False,
  547. losses=None,
  548. **params):
  549. super(CondenseNetV2_b, self).__init__(
  550. model_name='CondenseNetV2_b',
  551. num_classes=num_classes,
  552. use_mixed_loss=use_mixed_loss,
  553. losses=losses,
  554. **params)