json_Split.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. @File Description:
  16. # json数据集划分,可以通过val_split_rate、val_split_num控制划分比例或个数, keep_val_inTrain可以设定是否在train中保留val相关信息
  17. python ./coco_tools/json_Split.py \
  18. --json_all_path=./annotations/instances_val2017.json \
  19. --json_train_path=./instances_val2017_train.json \
  20. --json_val_path=./instances_val2017_val.json
  21. """
  22. import json
  23. import argparse
  24. import pandas as pd
  25. def get_annno(df_img_split, df_anno):
  26. df_merge = pd.merge(
  27. df_img_split, df_anno, on="image_id", suffixes=(None, '_r'))
  28. df_merge = df_merge[[c for c in df_merge.columns if not c.endswith('_r')]]
  29. df_anno_split = df_merge[df_anno.columns.to_list()]
  30. df_anno_split = df_anno_split.sort_values(by='id')
  31. return df_anno_split
  32. def js_split(js_all_path, js_train_path, js_val_path, val_split_rate,
  33. val_split_num, keep_val_inTrain, image_keyname, anno_keyname):
  34. print('Split'.center(100, '-'))
  35. print()
  36. print('json read...\n')
  37. with open(js_all_path, 'r') as load_f:
  38. data = json.load(load_f)
  39. df_anno = pd.DataFrame(data[anno_keyname])
  40. df_img = pd.DataFrame(data[image_keyname])
  41. df_img = df_img.rename(columns={"id": "image_id"})
  42. df_img = df_img.sample(frac=1, random_state=0)
  43. if val_split_num is None:
  44. val_split_num = int(val_split_rate * len(df_img))
  45. if keep_val_inTrain:
  46. df_img_train = df_img
  47. df_img_val = df_img[:val_split_num]
  48. df_anno_train = df_anno
  49. df_anno_val = get_annno(df_img_val, df_anno)
  50. else:
  51. df_img_train = df_img[val_split_num:]
  52. df_img_val = df_img[:val_split_num]
  53. df_anno_train = get_annno(df_img_train, df_anno)
  54. df_anno_val = get_annno(df_img_val, df_anno)
  55. df_img_train = df_img_train.rename(columns={"image_id": "id"}).sort_values(
  56. by='id')
  57. df_img_val = df_img_val.rename(columns={"image_id": "id"}).sort_values(
  58. by='id')
  59. data[image_keyname] = json.loads(df_img_train.to_json(orient='records'))
  60. data[anno_keyname] = json.loads(df_anno_train.to_json(orient='records'))
  61. str_json = json.dumps(data, ensure_ascii=False)
  62. with open(js_train_path, 'w', encoding='utf-8') as file_obj:
  63. file_obj.write(str_json)
  64. data[image_keyname] = json.loads(df_img_val.to_json(orient='records'))
  65. data[anno_keyname] = json.loads(df_anno_val.to_json(orient='records'))
  66. str_json = json.dumps(data, ensure_ascii=False)
  67. with open(js_val_path, 'w', encoding='utf-8') as file_obj:
  68. file_obj.write(str_json)
  69. print('image total %d, train %d, val %d' %
  70. (len(df_img), len(df_img_train), len(df_img_val)))
  71. print('anno total %d, train %d, val %d' %
  72. (len(df_anno), len(df_anno_train), len(df_anno_val)))
  73. return df_img
  74. def get_args():
  75. parser = argparse.ArgumentParser(description='Json Merge')
  76. # Parameters
  77. parser.add_argument('--json_all_path', type=str, help='json path to split')
  78. parser.add_argument(
  79. '--json_train_path',
  80. type=str,
  81. help='json path to save the split result -- train part')
  82. parser.add_argument(
  83. '--json_val_path',
  84. type=str,
  85. help='json path to save the split result -- val part')
  86. parser.add_argument(
  87. '--val_split_rate',
  88. type=float,
  89. default=0.1,
  90. help='val image number rate in total image, default is 0.1; if val_split_num is set, val_split_rate will not work'
  91. )
  92. parser.add_argument(
  93. '--val_split_num',
  94. type=int,
  95. default=None,
  96. help='val image number in total image, default is None; if val_split_num is set, val_split_rate will not work'
  97. )
  98. parser.add_argument(
  99. '--keep_val_inTrain',
  100. type=bool,
  101. default=False,
  102. help='if true, val part will be in train as well; which means that the content of json_train_path is the same as the content of json_all_path'
  103. )
  104. parser.add_argument(
  105. '--image_keyname',
  106. type=str,
  107. default='images',
  108. help='image key name in json, default images')
  109. parser.add_argument(
  110. '--anno_keyname',
  111. type=str,
  112. default='annotations',
  113. help='annotation key name in json, default annotations')
  114. parser.add_argument(
  115. '--Args_show',
  116. type=str,
  117. default='True',
  118. help='Args_show(default: True), if True, show args info')
  119. args = parser.parse_args()
  120. if args.Args_show.lower() == 'true':
  121. print('Args'.center(100, '-'))
  122. for k, v in vars(args).items():
  123. print('%s = %s' % (k, v))
  124. print()
  125. return args
  126. if __name__ == '__main__':
  127. args = get_args()
  128. js_split(args.json_all_path, args.json_train_path, args.json_val_path,
  129. args.val_split_rate, args.val_split_num, args.keep_val_inTrain,
  130. args.image_keyname, args.anno_keyname)