33from ..loader import SubmissionLogs
44from ..configuration .configuration import Config
55
6+
67class PerformanceCheck (BaseCheck ):
7- def __init__ (self , log , path , config : Config , submission_logs : SubmissionLogs ):
8+ def __init__ (self , log , path , config : Config ,
9+ submission_logs : SubmissionLogs ):
810 super ().__init__ (log , path )
911 self .name = "performance checks"
1012 self .submission_logs = submission_logs
1113 self .mlperf_log = self .submission_logs .performance_log
1214 self .system_json = self .submission_logs .system_json
1315 self .config = config
1416 self .model = self .submission_logs .loader_data .get ("benchmark" , "" )
15- self .model_mapping = self .submission_logs .loader_data .get ("model_mapping" , {})
16- self .model = self .config .get_mlperf_model (self .model , self .model_mapping )
17- self .scenario_fixed = self .submission_logs .loader_data .get ("scenario" , "" )
17+ self .model_mapping = self .submission_logs .loader_data .get (
18+ "model_mapping" , {})
19+ self .model = self .config .get_mlperf_model (
20+ self .model , self .model_mapping )
21+ self .scenario_fixed = self .submission_logs .loader_data .get (
22+ "scenario" , "" )
1823 self .scenario = self .mlperf_log ["effective_scenario" ]
1924 self .division = self .submission_logs .loader_data .get ("division" , "" )
2025 self .setup_checks ()
@@ -36,7 +41,7 @@ def missing_check(self):
3641 self .log .error ("Performance log missing at %s" , self .path )
3742 return False
3843 return True
39-
44+
4045 def loadgen_errors_check (self ):
4146 if self .mlperf_log .has_error ():
4247 if self .config .ignore_uncommited :
@@ -56,13 +61,17 @@ def loadgen_errors_check(self):
5661 return True
5762
5863 def equal_issue_check (self ):
59- if self .config .requires_equal_issue (self .model , self .division ) and self .mlperf_log ["effective_sample_concatenate_permutation" ]:
60- self .log .error ("%s requires equal issue mode (sample_concatenate_permutation), expected=true, found=false" , self .path )
64+ if self .config .requires_equal_issue (
65+ self .model , self .division ) and self .mlperf_log ["effective_sample_concatenate_permutation" ]:
66+ self .log .error (
67+ "%s requires equal issue mode (sample_concatenate_permutation), expected=true, found=false" ,
68+ self .path )
6169 return False
6270 return True
63-
71+
6472 def performance_sample_count_check (self ):
65- required_performance_sample_count = self .config .get_performance_sample_count (self .model )
73+ required_performance_sample_count = self .config .get_performance_sample_count (
74+ self .model )
6675 performance_sample_count = self .mlperf_log ["effective_performance_sample_count" ]
6776 if performance_sample_count < required_performance_sample_count :
6877 self .log .error (
@@ -73,7 +82,7 @@ def performance_sample_count_check(self):
7382 )
7483 return False
7584 return True
76-
85+
7786 def seeds_check (self ):
7887 config_seeds = self .config .seeds
7988 qsl_rng_seed = self .mlperf_log ["effective_qsl_rng_seed" ]
@@ -105,7 +114,7 @@ def seeds_check(self):
105114 )
106115 is_valid = False
107116 return is_valid
108-
117+
109118 def latency_check (self ):
110119 uses_early_stopping = self .config .uses_early_stopping (self .scenario )
111120 if uses_early_stopping :
@@ -165,7 +174,8 @@ def min_query_count_check(self):
165174 min_query_count = self .mlperf_log ["effective_min_query_count" ]
166175 samples_per_query = self .mlperf_log ["effective_samples_per_query" ]
167176 if not uses_early_stopping :
168- required_min_query_count = self .config .get_min_query_count (self .model , self .scenario )
177+ required_min_query_count = self .config .get_min_query_count (
178+ self .model , self .scenario )
169179 if required_min_query_count and min_query_count < required_min_query_count :
170180 self .log .error (
171181 "%s Required minimum Query Count not met by user config, Expected=%s, Found=%s" ,
@@ -184,7 +194,7 @@ def min_query_count_check(self):
184194 )
185195 return False
186196 return True
187-
197+
188198 def min_duration_check (self ):
189199 required_min_duration = TEST_DURATION_MS
190200 min_duration = self .mlperf_log ["effective_min_duration_ms" ]
@@ -197,9 +207,10 @@ def min_duration_check(self):
197207 )
198208 return False
199209 return True
200-
210+
201211 def network_check (self ):
202- is_network_mode_sys_spec_str = self .system_json .get (SYSTEM_DESC_IS_NETWORK_MODE )
212+ is_network_mode_sys_spec_str = self .system_json .get (
213+ SYSTEM_DESC_IS_NETWORK_MODE )
203214 is_network_system = (
204215 is_network_mode_sys_spec_str .lower () == "true"
205216 if is_network_mode_sys_spec_str is not None
@@ -216,7 +227,6 @@ def network_check(self):
216227 )
217228 return False
218229
219-
220230 sut_name = self .mlperf_log ["sut_name" ]
221231 if is_network_system :
222232 # for network mode verify the SUT name is valid, according to the rules
@@ -228,7 +238,6 @@ def network_check(self):
228238 return False
229239
230240 return True
231-
232241
233242 def llm_check (self ):
234243 if self .model in self .config .get_llm_models ():
0 commit comments