Skip to content

Commit 8053b06

Browse files
[Automated Commit] Format Codebase
1 parent ae9cfbd commit 8053b06

File tree

7 files changed

+131
-64
lines changed

7 files changed

+131
-64
lines changed

tools/submission/submission_checker/checks/accuracy_check.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@
55
import re
66
import os
77

8+
89
class AccuracyCheck(BaseCheck):
9-
def __init__(self, log, path, config: Config, submission_logs: SubmissionLogs):
10+
def __init__(self, log, path, config: Config,
11+
submission_logs: SubmissionLogs):
1012
super().__init__(log, path)
1113
self.name = "accuracy checks"
1214
self.submission_logs = submission_logs
@@ -15,9 +17,12 @@ def __init__(self, log, path, config: Config, submission_logs: SubmissionLogs):
1517
self.accuracy_json = self. submission_logs.accuracy_json
1618
self.config = config
1719
self.model = self.submission_logs.loader_data.get("benchmark", "")
18-
self.model_mapping = self.submission_logs.loader_data.get("model_mapping", {})
19-
self.model = self.config.get_mlperf_model(self.model, self.model_mapping)
20-
self.scenario_fixed = self.submission_logs.loader_data.get("scenario", "")
20+
self.model_mapping = self.submission_logs.loader_data.get(
21+
"model_mapping", {})
22+
self.model = self.config.get_mlperf_model(
23+
self.model, self.model_mapping)
24+
self.scenario_fixed = self.submission_logs.loader_data.get(
25+
"scenario", "")
2126
self.scenario = self.mlperf_log["effective_scenario"]
2227
self.division = self.submission_logs.loader_data.get("division", "")
2328
self.setup_checks()
@@ -97,7 +102,7 @@ def accuracy_result_check(self):
97102
is_valid = False
98103

99104
return is_valid
100-
105+
101106
def accuracy_json_check(self):
102107
if not os.path.exists(self.accuracy_json):
103108
self.log.error("%s is missing", self.accuracy_json)
@@ -107,7 +112,7 @@ def accuracy_json_check(self):
107112
self.log.error("%s is not truncated", self.accuracy_json)
108113
return False
109114
return True
110-
115+
111116
def loadgen_errors_check(self):
112117
if self.mlperf_log.has_error():
113118
if self.config.ignore_uncommited:
@@ -125,7 +130,7 @@ def loadgen_errors_check(self):
125130
)
126131
return False
127132
return True
128-
133+
129134
def dataset_check(self):
130135
qsl_total_count = self.mlperf_log["qsl_reported_total_count"]
131136
expected_qsl_total_count = self.config.get_dataset_size(self.model)
@@ -134,4 +139,4 @@ def dataset_check(self):
134139
"%s accurcy run does not cover all dataset, accuracy samples: %s, dataset size: %s", self.path, qsl_total_count, expected_qsl_total_count
135140
)
136141
return False
137-
return True
142+
return True

tools/submission/submission_checker/checks/base.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from abc import ABC, abstractmethod
22

3+
34
class BaseCheck(ABC):
45
"""
56
A generic check class meant to be inherited by concrete check implementations.
@@ -27,16 +28,19 @@ def run_checks(self):
2728
if not valid:
2829
return False
2930
return valid
30-
31+
3132
def execute(self, check):
3233
return check()
33-
34+
3435
def __call__(self):
3536
"""Allows the check instance to be called like a function."""
3637
self.log.info("Starting %s for: %s", self.name, self.path)
3738
valid = self.run_checks()
3839
if valid:
3940
self.log.info("All %s checks passed for: %s", self.name, self.path)
4041
else:
41-
self.log.error("Some %s Checks failed for: %s", self.name, self.path)
42-
return valid
42+
self.log.error(
43+
"Some %s Checks failed for: %s",
44+
self.name,
45+
self.path)
46+
return valid

tools/submission/submission_checker/checks/performance_check.py

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,23 @@
33
from ..loader import SubmissionLogs
44
from ..configuration.configuration import Config
55

6+
67
class PerformanceCheck(BaseCheck):
7-
def __init__(self, log, path, config: Config, submission_logs: SubmissionLogs):
8+
def __init__(self, log, path, config: Config,
9+
submission_logs: SubmissionLogs):
810
super().__init__(log, path)
911
self.name = "performance checks"
1012
self.submission_logs = submission_logs
1113
self.mlperf_log = self.submission_logs.performance_log
1214
self.system_json = self.submission_logs.system_json
1315
self.config = config
1416
self.model = self.submission_logs.loader_data.get("benchmark", "")
15-
self.model_mapping = self.submission_logs.loader_data.get("model_mapping", {})
16-
self.model = self.config.get_mlperf_model(self.model, self.model_mapping)
17-
self.scenario_fixed = self.submission_logs.loader_data.get("scenario", "")
17+
self.model_mapping = self.submission_logs.loader_data.get(
18+
"model_mapping", {})
19+
self.model = self.config.get_mlperf_model(
20+
self.model, self.model_mapping)
21+
self.scenario_fixed = self.submission_logs.loader_data.get(
22+
"scenario", "")
1823
self.scenario = self.mlperf_log["effective_scenario"]
1924
self.division = self.submission_logs.loader_data.get("division", "")
2025
self.setup_checks()
@@ -36,7 +41,7 @@ def missing_check(self):
3641
self.log.error("Performance log missing at %s", self.path)
3742
return False
3843
return True
39-
44+
4045
def loadgen_errors_check(self):
4146
if self.mlperf_log.has_error():
4247
if self.config.ignore_uncommited:
@@ -56,13 +61,17 @@ def loadgen_errors_check(self):
5661
return True
5762

5863
def equal_issue_check(self):
59-
if self.config.requires_equal_issue(self.model, self.division) and self.mlperf_log["effective_sample_concatenate_permutation"]:
60-
self.log.error("%s requires equal issue mode (sample_concatenate_permutation), expected=true, found=false", self.path)
64+
if self.config.requires_equal_issue(
65+
self.model, self.division) and self.mlperf_log["effective_sample_concatenate_permutation"]:
66+
self.log.error(
67+
"%s requires equal issue mode (sample_concatenate_permutation), expected=true, found=false",
68+
self.path)
6169
return False
6270
return True
63-
71+
6472
def performance_sample_count_check(self):
65-
required_performance_sample_count = self.config.get_performance_sample_count(self.model)
73+
required_performance_sample_count = self.config.get_performance_sample_count(
74+
self.model)
6675
performance_sample_count = self.mlperf_log["effective_performance_sample_count"]
6776
if performance_sample_count < required_performance_sample_count:
6877
self.log.error(
@@ -73,7 +82,7 @@ def performance_sample_count_check(self):
7382
)
7483
return False
7584
return True
76-
85+
7786
def seeds_check(self):
7887
config_seeds = self.config.seeds
7988
qsl_rng_seed = self.mlperf_log["effective_qsl_rng_seed"]
@@ -105,7 +114,7 @@ def seeds_check(self):
105114
)
106115
is_valid = False
107116
return is_valid
108-
117+
109118
def latency_check(self):
110119
uses_early_stopping = self.config.uses_early_stopping(self.scenario)
111120
if uses_early_stopping:
@@ -165,7 +174,8 @@ def min_query_count_check(self):
165174
min_query_count = self.mlperf_log["effective_min_query_count"]
166175
samples_per_query = self.mlperf_log["effective_samples_per_query"]
167176
if not uses_early_stopping:
168-
required_min_query_count = self.config.get_min_query_count(self.model, self.scenario)
177+
required_min_query_count = self.config.get_min_query_count(
178+
self.model, self.scenario)
169179
if required_min_query_count and min_query_count < required_min_query_count:
170180
self.log.error(
171181
"%s Required minimum Query Count not met by user config, Expected=%s, Found=%s",
@@ -184,7 +194,7 @@ def min_query_count_check(self):
184194
)
185195
return False
186196
return True
187-
197+
188198
def min_duration_check(self):
189199
required_min_duration = TEST_DURATION_MS
190200
min_duration = self.mlperf_log["effective_min_duration_ms"]
@@ -197,9 +207,10 @@ def min_duration_check(self):
197207
)
198208
return False
199209
return True
200-
210+
201211
def network_check(self):
202-
is_network_mode_sys_spec_str = self.system_json.get(SYSTEM_DESC_IS_NETWORK_MODE)
212+
is_network_mode_sys_spec_str = self.system_json.get(
213+
SYSTEM_DESC_IS_NETWORK_MODE)
203214
is_network_system = (
204215
is_network_mode_sys_spec_str.lower() == "true"
205216
if is_network_mode_sys_spec_str is not None
@@ -216,7 +227,6 @@ def network_check(self):
216227
)
217228
return False
218229

219-
220230
sut_name = self.mlperf_log["sut_name"]
221231
if is_network_system:
222232
# for network mode verify the SUT name is valid, according to the rules
@@ -228,7 +238,6 @@ def network_check(self):
228238
return False
229239

230240
return True
231-
232241

233242
def llm_check(self):
234243
if self.model in self.config.get_llm_models():

tools/submission/submission_checker/configuration/configuration.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,9 @@ def __init__(
1717
self.ignore_uncommited = ignore_uncommited
1818
self.skip_power_check = skip_power_check
1919
self.load_config(version)
20-
2120

2221
def load_config(self, version):
23-
# TODO: Load values from
22+
# TODO: Load values from
2423
self.models = self.base["models"]
2524
self.seeds = self.base["seeds"]
2625
if self.base.get("test05_seeds"):
@@ -100,7 +99,7 @@ def get_accuracy_target(self, model):
10099

101100
def get_accuracy_upper_limit(self, model):
102101
return self.accuracy_upper_limit.get(model, None)
103-
102+
104103
def get_accuracy_values(self, model):
105104
patterns = []
106105
acc_targets = []
@@ -147,7 +146,7 @@ def get_min_query_count(self, model, scenario):
147146
if model not in self.min_queries:
148147
raise ValueError("model not known: " + model)
149148
return self.min_queries[model].get(scenario)
150-
149+
151150
def get_dataset_size(self, model):
152151
model = self.get_mlperf_model(model)
153152
if model not in self.dataset_size:
@@ -187,7 +186,7 @@ def requires_equal_issue(self, model, division):
187186
]
188187
and self.version in ["v4.1"]
189188
)
190-
189+
191190
def get_llm_models(self):
192191
return [
193192
"llama2-70b-99",
@@ -197,6 +196,6 @@ def get_llm_models(self):
197196
"mixtral-8x7b",
198197
"llama3.1-405b",
199198
"llama3.1-8b",
200-
"llama3.1-8b-edge",
199+
"llama3.1-8b-edge",
201200
"deepseek-r1"
202201
]

tools/submission/submission_checker/constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1072,4 +1072,4 @@
10721072
"v5.1": "{division}/{submitter}/systems/{system}.json",
10731073
"v6.0": "{division}/{submitter}/systems/{system}.json",
10741074
"default": "{division}/{submitter}/systems/{system}.json",
1075-
}
1075+
}

0 commit comments

Comments
 (0)