|
@@ -72,8 +72,8 @@ def classification_eval(engine, epoch_id=0):
|
|
|
for key in loss_dict:
|
|
|
if key not in output_info:
|
|
|
output_info[key] = AverageMeter(key, '7.5f')
|
|
|
- output_info[key].update(loss_dict[key].numpy()[0],
|
|
|
- batch_size)
|
|
|
+ output_info[key].update(
|
|
|
+ float(loss_dict[key]), batch_size)
|
|
|
else:
|
|
|
out = engine.model(batch[0])
|
|
|
# calc loss
|
|
@@ -82,8 +82,7 @@ def classification_eval(engine, epoch_id=0):
|
|
|
for key in loss_dict:
|
|
|
if key not in output_info:
|
|
|
output_info[key] = AverageMeter(key, '7.5f')
|
|
|
- output_info[key].update(loss_dict[key].numpy()[0],
|
|
|
- batch_size)
|
|
|
+ output_info[key].update(float(loss_dict[key]), batch_size)
|
|
|
|
|
|
# just for DistributedBatchSampler issue: repeat sampling
|
|
|
current_samples = batch_size * paddle.distributed.get_world_size()
|
|
@@ -132,8 +131,8 @@ def classification_eval(engine, epoch_id=0):
|
|
|
if key not in output_info:
|
|
|
output_info[key] = AverageMeter(key, '7.5f')
|
|
|
|
|
|
- output_info[key].update(metric_dict[key].numpy()[0],
|
|
|
- current_samples)
|
|
|
+ output_info[key].update(
|
|
|
+ float(metric_dict[key]), current_samples)
|
|
|
|
|
|
time_info["batch_cost"].update(time.time() - tic)
|
|
|
|