Skip to content

Commit

Permalink
Add mount options from the CLI (#279)
Browse files Browse the repository at this point in the history
New CLI argument `--mount` that is a shortcut to override mount options for `input` parameters. Works with docker and singularity runners. This is useful to ensure (in a convenient way) that input directories and files are not accidently modified by MLCube tasks.

---------

Co-authored-by: Sergey Serebryakov <[email protected]>
  • Loading branch information
davidjurado and sergey-serebryakov authored Aug 15, 2023
1 parent bdcae7f commit 394a301
Show file tree
Hide file tree
Showing 8 changed files with 489 additions and 201 deletions.
18 changes: 17 additions & 1 deletion mlcube/mlcube/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
UsageExamples,
parse_cli_args,
)
from mlcube.config import MountType
from mlcube.errors import ExecutionError, IllegalParameterValueError, MLCubeError
from mlcube.parser import CliParser
from mlcube.shell import Shell
Expand Down Expand Up @@ -123,6 +124,14 @@ def parser_process(value: str, state: click.parser.ParsingState):
default=None,
help="CPU options defined during MLCube container execution.",
)
mount_option = click.option(
"--mount",
required=False,
type=click.Choice([MountType.RW, MountType.RO]),
default=None,
help="Mount options for all input parameters. These mount options override any other mount options defined for "
"each input parameters. A typical use case is to ensure that inputs are mounted in read-only (ro) mode.",
)


@click.group(name="mlcube", add_help_option=False)
Expand Down Expand Up @@ -279,6 +288,7 @@ def configure(mlcube: t.Optional[str], platform: str, p: t.Tuple[str]) -> None:
@gpus_option
@memory_option
@cpu_option
@mount_option
@Options.help
@click.pass_context
def run(
Expand All @@ -292,6 +302,7 @@ def run(
gpus: str,
memory: str,
cpu: str,
mount: str,
) -> None:
"""Run MLCube task(s).
Expand All @@ -307,9 +318,12 @@ def run(
gpus: GPU usage options defined during MLCube container execution.
memory: Memory RAM options defined during MLCube container execution.
cpu: CPU options defined during MLCube container execution.
mount: Mount (global) options defined for all input parameters in all tasks to be executed. They override any
mount options defined for individual parameters.
"""
logger.info(
"run input_arg mlcube=%s, platform=%s, task=%s, workspace=%s, network=%s, security=%s, gpus=%s, memory=%s, "
"run input_arg mlcube=%s, platform=%s, task=%s, workspace=%s, network=%s, security=%s, gpus=%s, "
"memory=%s, mount=%s"
"cpu=%s",
mlcube,
platform,
Expand All @@ -320,6 +334,7 @@ def run(
gpus,
memory,
cpu,
mount,
)
runner_cls, mlcube_config = parse_cli_args(
unparsed_args=ctx.args,
Expand All @@ -332,6 +347,7 @@ def run(
"gpus": gpus,
"memory": memory,
"cpu": cpu,
"mount": mount,
},
resolve=True,
)
Expand Down
2 changes: 1 addition & 1 deletion mlcube/mlcube/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ def check_parameters(parameters: DictConfig, task_cli_args: t.Dict) -> None:
)
#
for name in parameters.keys():
# The `_param_name` is anyway there, so check it's not None.
# The `name` is anyway there, so check it's not None.
[param_def] = MLCubeConfig.ensure_values_exist(parameters, name, dict)
# Deal with the case when value is a string (default value).
if isinstance(param_def, str):
Expand Down
78 changes: 63 additions & 15 deletions mlcube/mlcube/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def parse_list_arg(

@staticmethod
def parse_extra_arg(
unparsed_args: t.List[str], parsed_args: t.Dict[str, t.Optional[str]]
unparsed_args: t.List[str], parsed_args: t.Dict[str, t.Optional[str]]
) -> t.Tuple[DictConfig, t.Dict]:
"""Parse extra arguments on a command line.
Expand Down Expand Up @@ -111,31 +111,79 @@ def parse_extra_arg(
[arg[2:] for arg in unparsed_args if arg.startswith("-P")]
)

task_args = [arg.split("=") for arg in unparsed_args if not arg.startswith("-P")]
task_args = [
arg.split("=") for arg in unparsed_args if not arg.startswith("-P")
]
task_args = {arg[0]: arg[1] for arg in task_args}

# Parse unparsed arguments
platform: t.Optional[str] = parsed_args.get('platform', None)
if platform in {'docker', 'singularity'}:
platform: t.Optional[str] = parsed_args.get("platform", None)
if platform in {"docker", "singularity"}:
runner_run_args = {}
if parsed_args.get('network', None):
runner_run_args["--network"] = parsed_args['network']
if parsed_args.get('security', None):
if parsed_args.get("network", None):
runner_run_args["--network"] = parsed_args["network"]
if parsed_args.get("security", None):
key = "--security-opt" if platform == "docker" else "--security"
runner_run_args[key] = parsed_args['security']
if parsed_args.get('gpus', None):
runner_run_args[key] = parsed_args["security"]
if parsed_args.get("gpus", None):
if platform == "docker":
runner_run_args["--gpus"] = parsed_args['gpus']
runner_run_args["--gpus"] = parsed_args["gpus"]
else:
runner_run_args["--nv"] = ""
os.environ['SINGULARITYENV_CUDA_VISIBLE_DEVICES'] = parsed_args['gpus']
if parsed_args.get('memory', None):
os.environ["SINGULARITYENV_CUDA_VISIBLE_DEVICES"] = parsed_args[
"gpus"
]
if parsed_args.get("memory", None):
key = "--memory" if platform == "docker" else "--vm-ram"
runner_run_args[key] = parsed_args['memory']
if parsed_args.get('cpu', None):
runner_run_args[key] = parsed_args["memory"]
if parsed_args.get("cpu", None):
key = "--cpuset-cpus" if platform == "docker" else "--vm-cpu"
runner_run_args[key] = parsed_args['cpu']
runner_run_args[key] = parsed_args["cpu"]
runner_run_args["--mount_opts"] = parsed_args["mount"]

mlcube_args.merge_with({platform: runner_run_args})

return mlcube_args, task_args

@staticmethod
def parse_optional_arg(
platform: t.Optional[str],
network_option: t.Optional[str],
security_option: t.Optional[str],
gpus_option: t.Optional[str],
memory_option: t.Optional[str],
cpu_option: t.Optional[str],
mount_option: t.Optional[str],
) -> t.Tuple[DictConfig, t.Dict]:
"""platform: Platform to use to run this MLCube (docker, singularity, gcp, k8s etc).
network_option: Networking options defined during MLCube container execution.
security_option: Security options defined during MLCube container execution.
gpus_option: GPU usage options defined during MLCube container execution.
memory_option: Memory RAM options defined during MLCube container execution.
cpu_option: CPU options defined during MLCube container execution.
mount_option: Mount options for paths.
"""
mlcube_args, opts = {}, {}

opts["--mount_opts"] = mount_option
if network_option is not None:
opts["--network"] = network_option

if security_option is not None:
key = "--security-opt" if platform == "docker" else "--security"
opts[key] = security_option

if gpus_option is not None:
key = "--gpus" if platform == "docker" else "--nv"
opts[key] = gpus_option

if memory_option is not None:
key = "--memory" if platform == "docker" else "--vm-ram"
opts[key] = memory_option

if cpu_option is not None:
key = "--cpu-shares" if platform == "docker" else "--vm-cpu"
opts[key] = cpu_option

mlcube_args[platform] = opts
return mlcube_args, {}
Loading

0 comments on commit 394a301

Please sign in to comment.