Skip to content

Commit

Permalink
A test configuration for the environment based on local shell scripts (
Browse files Browse the repository at this point in the history
…#878)

# Pull Request

## Title

A test configuration for the environment based on local shell scripts

---

## Description

This is a sample MLOS configuration (CLI config, tunables, environment,
and the scripts) that launches a local environment and invokes local
shell scripts to set it up and run trials.

---

## Type of Change

This is purely a config-based update. We might add unit tests around
that setup later.

- ✨ New feature
- 📝 Documentation update
- 🧪 Tests

---

## Testing

Unit tests that validate the environment and run test experiments are
included in this PR.

To test the environment manually, run:
```bash
mlos_bench \
     --config mlos_bench/mlos_bench/tests/config/cli/test-cli-local-env-bench.jsonc \
     --globals experiment_test_local.jsonc \
     --tunable_values tunable-values/tunable-values-local.jsonc
```

---

## Additional Notes (optional)

This setup is supposed to serve as an example of other local
environments with shell scripts.

---
  • Loading branch information
motus authored Nov 1, 2024
1 parent 4a998b3 commit 5aeb452
Show file tree
Hide file tree
Showing 9 changed files with 348 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
//
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
//
// A test config to launch a local shell environment with some tunables.
//
// Run:
// mlos_bench \
// --config mlos_bench/mlos_bench/tests/config/cli/test-cli-local-env-bench.jsonc \
// --globals experiment_test_local.jsonc \
// --tunable_values tunable-values/tunable-values-local.jsonc
{
"config_path": [
"mlos_bench/mlos_bench/config",
"mlos_bench/mlos_bench/tests/config/experiments",
"mlos_bench/mlos_bench/tests/config"
],

// Include some sensitive parameters that should not be checked in (`shell_password`).
// Alternatively, one can specify this file through the --globals CLI option.
// "globals": [
// "test_local_private_params.jsonc"
// ],

"environment": "environments/local/test_local_env.jsonc",

// If optimizer is not specified, run a single benchmark trial.
// "optimizer": "optimizers/mlos_core_default_opt.jsonc",

// If storage is not specified, just print the results to the log.
// "storage": "storage/sqlite.jsonc",

"teardown": false,

"log_file": "test-local-bench.log",
"log_level": "DEBUG" // "INFO" for less verbosity
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
//
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
//
// A test config to launch a local shell environment with some tunables.
//
// Run:
// mlos_bench \
// --config mlos_bench/mlos_bench/tests/config/cli/test-cli-local-env-opt.jsonc \
// --globals experiment_test_local.jsonc \
// --max_suggestions 10
{
"config_path": [
"mlos_bench/mlos_bench/config",
"mlos_bench/mlos_bench/tests/config/experiments",
"mlos_bench/mlos_bench/tests/config"
],

// Include some sensitive parameters that should not be checked in (`shell_password`).
// Alternatively, one can specify this file through the --globals CLI option.
// "globals": [
// "test_local_private_params.jsonc"
// ],

"environment": "environments/local/test_local_env.jsonc",

// If optimizer is not specified, run a single benchmark trial.
"optimizer": "optimizers/mlos_core_default_opt.jsonc",

// If storage is not specified, just print the results to the log.
// "storage": "storage/sqlite.jsonc",

"teardown": false,

"log_file": "test-local-bench.log",
"log_level": "DEBUG" // "INFO" for less verbosity
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/usr/bin/env python3
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper script to run the benchmark and store the results and telemetry in CSV files.
This is a sample script that demonstrates how to produce the benchmark results
and telemetry in the format that MLOS expects.
THIS IS A TOY EXAMPLE. The script does not run any actual benchmarks and produces fake
data for demonstration purposes. Please copy and extend it to suit your needs.
Run:
./bench_run.py ./output-metrics.csv ./output-telemetry.csv`
"""

import argparse
from datetime import datetime, timedelta

import pandas


def _main(output_metrics: str, output_telemetry: str) -> None:

# Some fake const data that we can check in the unit tests.
# Our unit tests expect the `score` metric to be present in the output.
df_metrics = pandas.DataFrame(
[
{"metric": "score", "value": 123.4}, # A copy of `total_time`
{"metric": "total_time", "value": 123.4},
{"metric": "latency", "value": 9.876},
{"metric": "throughput", "value": 1234567},
]
)
df_metrics.to_csv(output_metrics, index=False)

# Timestamps are const so we can check them in the tests.
timestamp = datetime(2024, 10, 25, 13, 45)
ts_delta = timedelta(seconds=30)

df_telemetry = pandas.DataFrame(
[
{"timestamp": timestamp, "metric": "cpu_load", "value": 0.1},
{"timestamp": timestamp, "metric": "mem_usage", "value": 20.0},
{"timestamp": timestamp + ts_delta, "metric": "cpu_load", "value": 0.6},
{"timestamp": timestamp + ts_delta, "metric": "mem_usage", "value": 33.0},
{"timestamp": timestamp + 2 * ts_delta, "metric": "cpu_load", "value": 0.5},
{"timestamp": timestamp + 2 * ts_delta, "metric": "mem_usage", "value": 31.0},
]
)
df_telemetry.to_csv(output_telemetry, index=False)


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run the benchmark and save the results in CSV files."
)
parser.add_argument("output_metrics", help="CSV file to save the benchmark results to.")
parser.add_argument("output_telemetry", help="CSV file for telemetry data.")
args = parser.parse_args()
_main(args.output_metrics, args.output_telemetry)
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
#!/usr/bin/env python3
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Helper script to update the environment parameters from JSON.
This is a sample script that demonstrates how to read the tunable parameters
and metadata from JSON and produce some kind of a configuration file for the
application that is being benchmarked or optimized.
THIS IS A TOY EXAMPLE. The script does not have any actual effect on the system.
Please copy and extend it to suit your needs.
Run:
`./bench_setup.py ./input-params.json ./input-params-meta.json`
"""

import argparse
import json
import os


def _main(fname_input: str, fname_meta: str, fname_output: str) -> None:

# In addition to the input JSON files,
# MLOS can pass parameters through the OS environment:
print(f'# RUN: {os.environ["experiment_id"]}:{os.environ["trial_id"]}')

# Key-value pairs of tunable parameters, e.g.,
# {"shared_buffers": "128", ...}
with open(fname_input, "rt", encoding="utf-8") as fh_tunables:
tunables_data = json.load(fh_tunables)

# Optional free-format metadata for tunable parameters, e.g.
# {"shared_buffers": {"suffix": "MB"}, ...}
with open(fname_meta, "rt", encoding="utf-8") as fh_meta:
tunables_meta = json.load(fh_meta)

# Pretend that we are generating a PG config file with lines like:
# shared_buffers = 128MB
with open(fname_output, "wt", encoding="utf-8", newline="") as fh_config:
for key, val in tunables_data.items():
meta = tunables_meta.get(key, {})
suffix = meta.get("suffix", "")
line = f"{key} = {val}{suffix}"
fh_config.write(line + "\n")
print(line)


if __name__ == "__main__":

parser = argparse.ArgumentParser(description="Update the environment parameters from JSON.")

parser.add_argument("input", help="JSON file with tunable parameters.")
parser.add_argument("meta", help="JSON file with tunable parameters metadata.")
parser.add_argument("output", help="Output config file.")

args = parser.parse_args()

_main(args.input, args.meta, args.output)
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
{
"test_local_tunable_group": {
"cost": 1,
"params": {
"shared_buffers": {
"type": "int",
"default": 128,
"range": [1, 1024],
"meta": {"suffix": "MB"}
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
// Test config for test_local_env.py
{
"class": "mlos_bench.environments.local.LocalEnv",
"name": "Local Shell Test Environment",

"include_services": [
// from the built in configs
"services/local/local_exec_service.jsonc"
],

// Include the definitions of the tunable parameters to use
// in this environment and its children, if there are any:
"include_tunables": [
"environments/local/test_local-tunables.jsonc"
],

"config": {

// GROUPS of tunable parameters to use in this environment:
"tunable_params": [
"test_local_tunable_group"
],

"const_args": {
// The actual value should be provided by the user externally
// (e.g., through the --globals file).
// This is just a placeholder to make unit tests work.
"script_password": "PLACEHOLDER"
},

// Other non-tunable parameters to use in this environment:
"required_args": [
"experiment_id", // Specified by the user in `experiment_test_local.jsonc`
"trial_id", // Provided by MLOS/storage
"script_password" // Should be provided by the user (e.g., through --globals).
],

// Pass these parameters to the shell script as env variables:
// (can be the names of the tunables, too)
"shell_env_params": [
"experiment_id",
"trial_id",
"script_password",
"shared_buffers" // This tunable parameter will appear as env var (and in JSON)
],

// MLOS will dump key-value pairs of tunable parameters
// into this file in temp directory:
"dump_params_file": "input-params.json",

// [Optionally] MLOS can dump metadata of tunable parameters here:
"dump_meta_file": "input-params-meta.json",

// MLOS will create a temp directory, store the parameters and metadata
// into it, and run the setup script from there:
"setup": [
"echo Set up $experiment_id:$trial_id :: shared_buffers = $shared_buffers",
"environments/local/scripts/bench_setup.py input-params.json input-params-meta.json 99_bench.conf"
],

// Run the benchmark script from the temp directory.
"run": [
"echo Run $experiment_id:$trial_id",
"environments/local/scripts/bench_run.py output-metrics.csv output-telemetry.csv"
],

// [Optionally] MLOS can run the teardown script from the temp directory.
// We don't need it here, because it will automatically clean up
// the temp directory after each trial.
"teardown": [
"echo Tear down $experiment_id:$trial_id"
],

// [Optionally] MLOS can read telemetry data produced by the
// `bench_run.py` script:
"read_telemetry_file": "output-telemetry.csv",

// MLOS will read the results of the benchmark from this file:
// (created by the "run" script)
"read_results_file": "output-metrics.csv"
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Global parameters for the experiment.
{
"experiment_id": "TEST-LOCAL-001",

// Add your parameters here. Remember to declare them in the
// experiments' configs in "const_args" and/or "required_args" or
// in the Service or Optimizer "config" section.

// This parameter gets propagated into the optimizer config.
// By default, MLOS expects the benchmark to produce a single
// scalar, "score".
"optimization_targets": {
"score": "min", // Same as `total_time`, we need it for unit tests.
"total_time": "min",
"throughput": "max"
},

// Another parameter that gets propagated into the optimizer config.
// Each such parameter can be overridden by the CLI option, e.g.,
// `--max_suggestions 20`
// Number of configurations to be suggested by the optimizer,
// if optimization is enabled.
"max_suggestions": 10
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
// A simple key-value assignment of an tunables instance.
{
"$schema": "https://raw.githubusercontent.com/microsoft/MLOS/main/mlos_bench/mlos_bench/config/schemas/tunables/tunable-values-schema.json",

// Values that are different from the defaults
// in order to test --tunable-values handling in OneShotOptimizer.
"shared_buffers": 256
}
22 changes: 22 additions & 0 deletions mlos_bench/mlos_bench/tests/launcher_in_process_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,28 @@
],
64.53897,
),
(
[
"--config",
"mlos_bench/mlos_bench/tests/config/cli/test-cli-local-env-bench.jsonc",
"--globals",
"experiment_test_local.jsonc",
"--tunable_values",
"tunable-values/tunable-values-local.jsonc",
],
123.4,
),
(
[
"--config",
"mlos_bench/mlos_bench/tests/config/cli/test-cli-local-env-opt.jsonc",
"--globals",
"experiment_test_local.jsonc",
"--max-suggestions",
"3",
],
123.4,
),
],
)
def test_main_bench(argv: List[str], expected_score: float) -> None:
Expand Down

0 comments on commit 5aeb452

Please sign in to comment.