Skip to content

Commit

Permalink
Submission checker version 4.0 (#1560)
Browse files Browse the repository at this point in the history
* Submission checker version 4.0

* Fix Llama2-70b name

* Add 4.0 random seeds
  • Loading branch information
pgmpablo157321 committed Jan 17, 2024
1 parent 007ec3c commit 8e36925
Show file tree
Hide file tree
Showing 10 changed files with 307 additions and 34 deletions.
4 changes: 2 additions & 2 deletions language/llama2-70b/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Reference Implementation for Llama-v2-70B
# Reference Implementation for llama2-70b

**Basic implementation for Llama-v2-70B. Few noteworthy items:**
**Basic implementation for llama2-70b. Few noteworthy items:**

+ Processing of Validation dataset is not finalized yet. Decision on input token lengths is pending
+ Streamer for communicating with loadgen has quite some overhead. This is only meant to provide functional implementation
Expand Down
4 changes: 2 additions & 2 deletions language/llama2-70b/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ def main():
settings = lg.TestSettings()
settings.scenario = scenario_map[args.scenario.lower()]
# Need to update the conf
settings.FromConfig(args.mlperf_conf, "llama-v2-70b", args.scenario)
settings.FromConfig(args.user_conf, "llama-v2-70b", args.scenario)
settings.FromConfig(args.mlperf_conf, "llama2-70b", args.scenario)
settings.FromConfig(args.user_conf, "llama2-70b", args.scenario)

if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
Expand Down
6 changes: 3 additions & 3 deletions language/llama2-70b/mlperf.conf
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ rnnt.Server.target_latency = 1000
gptj.Server.target_latency = 20000

# Falcon Server scenario requires two latency constraints
llama-v2-70b.Server.target_latency = 2000
llama-v2-70b.Server.ttft_latency = 2000
llama-v2-70b.Server.tpot_latency = 200
llama2-70b.Server.target_latency = 2000
llama2-70b.Server.ttft_latency = 2000
llama2-70b.Server.tpot_latency = 200

*.Offline.target_latency_percentile = 90
*.Offline.min_duration = 600000
Expand Down
4 changes: 2 additions & 2 deletions loadgen/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ cmake_minimum_required(VERSION 3.1)
project(mlperf_loadgen)

# The mlperf_loadgen version.
set(mlperf_loadgen_VERSION_MAJOR 3)
set(mlperf_loadgen_VERSION_MINOR 1)
set(mlperf_loadgen_VERSION_MAJOR 4)
set(mlperf_loadgen_VERSION_MINOR 0)
message("mlperf_loadgen v${mlperf_loadgen_VERSION_MAJOR}.${mlperf_loadgen_VERSION_MINOR}")

# Set build options. NB: CXX_STANDARD is supported since CMake 3.1.
Expand Down
4 changes: 2 additions & 2 deletions loadgen/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,13 +76,13 @@

mlperf_loadgen_module = Pybind11Extension(
"mlperf_loadgen",
define_macros=[("MAJOR_VERSION", "3"), ("MINOR_VERSION", "1")],
define_macros=[("MAJOR_VERSION", "4"), ("MINOR_VERSION", "0")],
include_dirs=[".", get_include()],
sources=mlperf_loadgen_sources,
depends=mlperf_loadgen_headers)

setup(name="mlperf_loadgen",
version="3.1",
version="4.0",
description="MLPerf Inference LoadGen python bindings",
url="https://mlcommons.org/",
cmdclass={"build_ext": build_ext},
Expand Down
2 changes: 1 addition & 1 deletion loadgen/version_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def generate_loadgen_version_definitions(cc_filename, loadgen_root):
ofile.write("// DO NOT EDIT: Autogenerated by version_generator.py.\n\n")
ofile.write("#include <string>\n\n")
ofile.write("namespace mlperf {\n\n")
ofile.write(func_def("Version", "\"3.1\""))
ofile.write(func_def("Version", "\"4.0\""))

date_time_now_local = datetime.datetime.now().isoformat()
date_time_now_utc = datetime.datetime.utcnow().isoformat()
Expand Down
12 changes: 6 additions & 6 deletions mlperf.conf
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ stable-diffusion-xl.*.performance_sample_count_override = 5000
3d-unet.*.performance_sample_count_override = 0

# Set seeds. The seeds will be distributed two weeks before the submission.
*.*.qsl_rng_seed = 148687905518835231
*.*.sample_index_rng_seed = 520418551913322573
*.*.schedule_rng_seed = 811580660758947900
*.*.qsl_rng_seed = 13281865557512327830
*.*.sample_index_rng_seed = 198141574272810017
*.*.schedule_rng_seed = 7575108116881280410
# Set seeds for TEST_05. The seeds will be distributed two weeks before the submission.
*.*.test05_qsl_rng_seed = 793197339507417767
*.*.test05_sample_index_rng_seed = 255610748586851044
*.*.test05_schedule_rng_seed = 352213341366340113
*.*.test05_qsl_rng_seed = 2376919268182438552
*.*.test05_sample_index_rng_seed = 11176391829184272374
*.*.test05_schedule_rng_seed = 3911940905271271337


*.SingleStream.target_latency_percentile = 90
Expand Down
5 changes: 4 additions & 1 deletion text_to_image/tools/sample_ids.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,16 @@ def get_args():
parser.add_argument(
"--n", type=int, default=10, help="Dataset download location"
)
parser.add_argument(
"--seed", "-s", type=int, default=926019364, help="Dataset download location"
)
args = parser.parse_args()
return args


if __name__ == "__main__":
args = get_args()
np.random.seed(42)
np.random.seed(args.seed)
df_annotations = pd.read_csv(f"{args.tsv_path}", sep="\t")
sample_ids = list(np.random.choice(df_annotations.shape[0], args.n))
with open(args.output_path, "w+") as f:
Expand Down
20 changes: 10 additions & 10 deletions text_to_image/tools/sample_ids.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
860
3772
3092
466
4426
3444
3171
2919
130
1685
4459
4015
2705
1682
4048
4683
3757
1578
3319
95
Loading

0 comments on commit 8e36925

Please sign in to comment.