11# Copyright (c) OpenMMLab. All rights reserved.
22import argparse
33import glob
4- import json
4+ import os
55import os .path as osp
66import shutil
77import subprocess
8+ import time
89from collections import OrderedDict
910
1011import torch
@@ -32,11 +33,15 @@ def process_checkpoint(in_file, out_file):
3233 # remove optimizer for smaller file size
3334 if 'optimizer' in checkpoint :
3435 del checkpoint ['optimizer' ]
36+ if 'ema_state_dict' in checkpoint :
37+ del checkpoint ['ema_state_dict' ]
3538
3639 # remove ema state_dict
3740 for key in list (checkpoint ['state_dict' ]):
3841 if key .startswith ('ema_' ):
3942 checkpoint ['state_dict' ].pop (key )
43+ elif key .startswith ('data_preprocessor' ):
44+ checkpoint ['state_dict' ].pop (key )
4045
4146 # if it is necessary to remove some sensitive data in checkpoint['meta'],
4247 # add the code here.
@@ -52,15 +57,15 @@ def process_checkpoint(in_file, out_file):
5257
5358def is_by_epoch (config ):
5459 cfg = Config .fromfile ('./configs/' + config )
55- return cfg .runner .type == 'EpochBasedRunner '
60+ return cfg .train_cfg .type == 'EpochBasedTrainLoop '
5661
5762
5863def get_final_epoch_or_iter (config ):
5964 cfg = Config .fromfile ('./configs/' + config )
60- if cfg .runner .type == 'EpochBasedRunner ' :
61- return cfg .runner .max_epochs
65+ if cfg .train_cfg .type == 'EpochBasedTrainLoop ' :
66+ return cfg .train_cfg .max_epochs
6267 else :
63- return cfg .runner .max_iters
68+ return cfg .train_cfg .max_iters
6469
6570
6671def get_best_epoch_or_iter (exp_dir ):
@@ -74,60 +79,22 @@ def get_best_epoch_or_iter(exp_dir):
7479
7580def get_real_epoch_or_iter (config ):
7681 cfg = Config .fromfile ('./configs/' + config )
77- if cfg .runner .type == 'EpochBasedRunner' :
78- epoch = cfg .runner .max_epochs
79- if cfg .data .train .type == 'RepeatDataset' :
80- epoch *= cfg .data .train .times
82+ if cfg .train_cfg .type == 'EpochBasedTrainLoop' :
83+ epoch = cfg .train_cfg .max_epochs
8184 return epoch
8285 else :
83- return cfg .runner .max_iters
86+ return cfg .train_cfg .max_iters
8487
8588
8689def get_final_results (log_json_path ,
8790 epoch_or_iter ,
88- results_lut ,
91+ results_lut = 'coco/bbox_mAP' ,
8992 by_epoch = True ):
9093 result_dict = dict ()
91- last_val_line = None
92- last_train_line = None
93- last_val_line_idx = - 1
94- last_train_line_idx = - 1
95- with open (log_json_path , 'r' ) as f :
96- for i , line in enumerate (f .readlines ()):
97- log_line = json .loads (line )
98- if 'mode' not in log_line .keys ():
99- continue
100-
101- if by_epoch :
102- if (log_line ['mode' ] == 'train'
103- and log_line ['epoch' ] == epoch_or_iter ):
104- result_dict ['memory' ] = log_line ['memory' ]
105-
106- if (log_line ['mode' ] == 'val'
107- and log_line ['epoch' ] == epoch_or_iter ):
108- result_dict .update ({
109- key : log_line [key ]
110- for key in results_lut if key in log_line
111- })
112- return result_dict
113- else :
114- if log_line ['mode' ] == 'train' :
115- last_train_line_idx = i
116- last_train_line = log_line
117-
118- if log_line and log_line ['mode' ] == 'val' :
119- last_val_line_idx = i
120- last_val_line = log_line
121-
122- # bug: max_iters = 768, last_train_line['iter'] = 750
123- assert last_val_line_idx == last_train_line_idx + 1 , \
124- 'Log file is incomplete'
125- result_dict ['memory' ] = last_train_line ['memory' ]
126- result_dict .update ({
127- key : last_val_line [key ]
128- for key in results_lut if key in last_val_line
129- })
130-
94+ with open (log_json_path ) as f :
95+ r = f .readlines ()[- 1 ]
96+ last_metric = r .split (',' )[0 ].split (': ' )[- 1 ].strip ()
97+ result_dict [results_lut ] = last_metric
13198 return result_dict
13299
133100
@@ -150,6 +117,16 @@ def get_dataset_name(config):
150117 return name_map [cfg .dataset_type ]
151118
152119
120+ def find_last_dir (model_dir ):
121+ dst_times = []
122+ for time_stamp in os .scandir (model_dir ):
123+ if osp .isdir (time_stamp ):
124+ dst_time = time .mktime (
125+ time .strptime (time_stamp .name , '%Y%m%d_%H%M%S' ))
126+ dst_times .append ([dst_time , time_stamp .name ])
127+ return max (dst_times , key = lambda x : x [0 ])[1 ]
128+
129+
153130def convert_model_info_to_pwc (model_infos ):
154131 pwc_files = {}
155132 for model in model_infos :
@@ -160,9 +137,7 @@ def convert_model_info_to_pwc(model_infos):
160137 pwc_model_info ['Config' ] = osp .join ('configs' , model ['config' ])
161138
162139 # get metadata
163- memory = round (model ['results' ]['memory' ] / 1024 , 1 )
164140 meta_data = OrderedDict ()
165- meta_data ['Training Memory (GB)' ] = memory
166141 if 'epochs' in model :
167142 meta_data ['Epochs' ] = get_real_epoch_or_iter (model ['config' ])
168143 else :
@@ -198,7 +173,7 @@ def convert_model_info_to_pwc(model_infos):
198173 Metrics = {'PQ' : metric }))
199174 pwc_model_info ['Results' ] = results
200175
201- link_string = 'https://download.openmmlab.com/mmdetection/v2 .0/'
176+ link_string = 'https://download.openmmlab.com/mmdetection/v3 .0/'
202177 link_string += '{}/{}' .format (model ['config' ].rstrip ('.py' ),
203178 osp .split (model ['model_path' ])[- 1 ])
204179 pwc_model_info ['Weights' ] = link_string
@@ -214,9 +189,13 @@ def parse_args():
214189 parser .add_argument (
215190 'root' ,
216191 type = str ,
192+ default = 'work_dirs' ,
217193 help = 'root path of benchmarked models to be gathered' )
218194 parser .add_argument (
219- 'out' , type = str , help = 'output path of gathered models to be stored' )
195+ '--out' ,
196+ type = str ,
197+ default = 'gather' ,
198+ help = 'output path of gathered models to be stored' )
220199 parser .add_argument (
221200 '--best' ,
222201 action = 'store_true' ,
@@ -262,32 +241,22 @@ def main():
262241 continue
263242
264243 # get the latest logs
265- log_json_path = list (
266- sorted (glob .glob (osp .join (exp_dir , '*.log.json' ))))[- 1 ]
267- log_txt_path = list (sorted (glob .glob (osp .join (exp_dir , '*.log' ))))[- 1 ]
268- cfg = Config .fromfile ('./configs/' + used_config )
269- results_lut = cfg .evaluation .metric
270- if not isinstance (results_lut , list ):
271- results_lut = [results_lut ]
272- # case when using VOC, the evaluation key is only 'mAP'
273- # when using Panoptic Dataset, the evaluation key is 'PQ'.
274- for i , key in enumerate (results_lut ):
275- if 'mAP' not in key and 'PQ' not in key :
276- results_lut [i ] = key + '_mAP'
277- model_performance = get_final_results (log_json_path ,
278- final_epoch_or_iter , results_lut ,
279- by_epoch )
244+ latest_exp_name = find_last_dir (exp_dir )
245+ latest_exp_json = osp .join (exp_dir , latest_exp_name , 'vis_data' ,
246+ latest_exp_name + '.json' )
247+
248+ model_performance = get_final_results (
249+ latest_exp_json , final_epoch_or_iter , by_epoch = by_epoch )
280250
281251 if model_performance is None :
282252 continue
283253
284- model_time = osp .split (log_txt_path )[- 1 ].split ('.' )[0 ]
285254 model_info = dict (
286255 config = used_config ,
287256 results = model_performance ,
288- model_time = model_time ,
289257 final_model = final_model ,
290- log_json_path = osp .split (log_json_path )[- 1 ])
258+ latest_exp_json = latest_exp_json ,
259+ latest_exp_name = latest_exp_name )
291260 model_info ['epochs' if by_epoch else 'iterations' ] = \
292261 final_epoch_or_iter
293262 model_infos .append (model_info )
@@ -300,7 +269,7 @@ def main():
300269
301270 model_name = osp .split (model ['config' ])[- 1 ].split ('.' )[0 ]
302271
303- model_name += '_' + model ['model_time ' ]
272+ model_name += '_' + model ['latest_exp_name ' ]
304273 publish_model_path = osp .join (model_publish_dir , model_name )
305274 trained_model_path = osp .join (models_root , model ['config' ],
306275 model ['final_model' ])
@@ -310,13 +279,8 @@ def main():
310279 publish_model_path )
311280
312281 # copy log
313- shutil .copy (
314- osp .join (models_root , model ['config' ], model ['log_json_path' ]),
315- osp .join (model_publish_dir , f'{ model_name } .log.json' ))
316- shutil .copy (
317- osp .join (models_root , model ['config' ],
318- model ['log_json_path' ].rstrip ('.json' )),
319- osp .join (model_publish_dir , f'{ model_name } .log' ))
282+ shutil .copy (model ['latest_exp_json' ],
283+ osp .join (model_publish_dir , f'{ model_name } .log.json' ))
320284
321285 # copy config to guarantee reproducibility
322286 config_path = model ['config' ]
0 commit comments