diff --git a/apps.awesim.org/apps/bc_desktop/cardinal.yml.erb b/apps.awesim.org/apps/bc_desktop/cardinal.yml.erb index 36c733c..0dbe222 100644 --- a/apps.awesim.org/apps/bc_desktop/cardinal.yml.erb +++ b/apps.awesim.org/apps/bc_desktop/cardinal.yml.erb @@ -68,82 +68,37 @@ attributes: widget: select label: "Node type" help: | - - **Standard Compute**
- These are standard HPC machines. There are 224 with 40 cores and - 340 with 48. They all have 192 GB of RAM. Chosing any will decrease - your wait time. - - **GPU Enabled**
- These are HPC machines with [NVIDIA Tesla V100 GPUs]. They have the same - 40 core machines have 2 GPUs with 16 GB of RAM and 48 core machines have 2 - with 32 GB of RAM. Densegpu types have 4 GPUs with 16 GB of RAM. - Visualization nodes are GPU enabled nodes with an X Server in the background - for 3D visualization using VirtualGL. - - **Large Memory**
- These are HPC machines with very large amounts of memory. Largmem nodes - have 48 cores with 768 GB of RAM. Hugemem nodes have 80 cores with 3 TB of RAM. - - Visit the OSC site for more [detailed information on the Pitzer cluster]. - [detailed information on the Pitzer cluster]: https://www.osc.edu/resources/technical_support/supercomputers/pitzer - [NVIDIA Tesla V100 GPUs]: https://www.nvidia.com/en-us/data-center/v100/ + - **any** - (*28 cores*) Chooses anyone of the available Cardinal nodes. + This reduces the wait time as you have no requirements. + - **vis** - (*28 cores*) This node includes an NVIDIA Tesla P100 GPU with + an X server running in the background. This allows for Hardware + Rendering with the GPU typically needed for 3D visualization using + VirtualGL. There are currently only 10 of these nodes on Owens. + - **gpu** - (*28 cores*) This node includes an NVIDIA Tesla P100 GPU + allowing for CUDA computations. There are currently only 160 of these + nodes on Cardinal. These nodes don't start an X server, so visualization + with hardware rendering is not possible. + - **hugemem** - (*48 cores*) This Owens node has 1.5TB of available RAM + as well as 48 cores. There are 16 of these nodes on Owens. options: - [ "any", "any", data-min-cores: 1, - data-max-cores: 80, - data-set-gpus: 0, + data-max-cores: 28, ] - [ - "40 core", "any-40core", + "vis", "vis", data-min-cores: 1, - data-max-cores: 40, - data-set-gpus: 0, + data-max-cores: 28, ] - [ - "48 core", "any-48core", + "gpu", "gpu-any", data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 0, - ] - - [ - "any gpu", "gpu-any", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 1, - ] - - [ - "40 core with gpu", "gpu-40core", - data-min-cores: 1, - data-max-cores: 40, - data-set-gpus: 1, - ] - - [ - "48 core with gpu", "gpu-48core", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 1, - ] - - [ - "densegpu", "densegpu", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 4, - ] - - [ - "visualization node", "vis", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 1, - ] - - [ - "largemem", "largemem", - data-min-cores: 24, - data-max-cores: 48, - data-set-gpus: 0, + data-max-cores: 28, ] - [ "hugemem", "hugemem", - data-min-cores: 20, - data-max-cores: 80, - data-set-gpus: 0, + data-min-cores: 4, + data-max-cores: 48, ] submit: submit/slurm.yml.erb \ No newline at end of file diff --git a/ondemand.osc.edu/apps/bc_desktop/cardinal.yml.erb b/ondemand.osc.edu/apps/bc_desktop/cardinal.yml.erb index 36c733c..0dbe222 100644 --- a/ondemand.osc.edu/apps/bc_desktop/cardinal.yml.erb +++ b/ondemand.osc.edu/apps/bc_desktop/cardinal.yml.erb @@ -68,82 +68,37 @@ attributes: widget: select label: "Node type" help: | - - **Standard Compute**
- These are standard HPC machines. There are 224 with 40 cores and - 340 with 48. They all have 192 GB of RAM. Chosing any will decrease - your wait time. - - **GPU Enabled**
- These are HPC machines with [NVIDIA Tesla V100 GPUs]. They have the same - 40 core machines have 2 GPUs with 16 GB of RAM and 48 core machines have 2 - with 32 GB of RAM. Densegpu types have 4 GPUs with 16 GB of RAM. - Visualization nodes are GPU enabled nodes with an X Server in the background - for 3D visualization using VirtualGL. - - **Large Memory**
- These are HPC machines with very large amounts of memory. Largmem nodes - have 48 cores with 768 GB of RAM. Hugemem nodes have 80 cores with 3 TB of RAM. - - Visit the OSC site for more [detailed information on the Pitzer cluster]. - [detailed information on the Pitzer cluster]: https://www.osc.edu/resources/technical_support/supercomputers/pitzer - [NVIDIA Tesla V100 GPUs]: https://www.nvidia.com/en-us/data-center/v100/ + - **any** - (*28 cores*) Chooses anyone of the available Cardinal nodes. + This reduces the wait time as you have no requirements. + - **vis** - (*28 cores*) This node includes an NVIDIA Tesla P100 GPU with + an X server running in the background. This allows for Hardware + Rendering with the GPU typically needed for 3D visualization using + VirtualGL. There are currently only 10 of these nodes on Owens. + - **gpu** - (*28 cores*) This node includes an NVIDIA Tesla P100 GPU + allowing for CUDA computations. There are currently only 160 of these + nodes on Cardinal. These nodes don't start an X server, so visualization + with hardware rendering is not possible. + - **hugemem** - (*48 cores*) This Owens node has 1.5TB of available RAM + as well as 48 cores. There are 16 of these nodes on Owens. options: - [ "any", "any", data-min-cores: 1, - data-max-cores: 80, - data-set-gpus: 0, + data-max-cores: 28, ] - [ - "40 core", "any-40core", + "vis", "vis", data-min-cores: 1, - data-max-cores: 40, - data-set-gpus: 0, + data-max-cores: 28, ] - [ - "48 core", "any-48core", + "gpu", "gpu-any", data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 0, - ] - - [ - "any gpu", "gpu-any", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 1, - ] - - [ - "40 core with gpu", "gpu-40core", - data-min-cores: 1, - data-max-cores: 40, - data-set-gpus: 1, - ] - - [ - "48 core with gpu", "gpu-48core", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 1, - ] - - [ - "densegpu", "densegpu", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 4, - ] - - [ - "visualization node", "vis", - data-min-cores: 1, - data-max-cores: 48, - data-set-gpus: 1, - ] - - [ - "largemem", "largemem", - data-min-cores: 24, - data-max-cores: 48, - data-set-gpus: 0, + data-max-cores: 28, ] - [ "hugemem", "hugemem", - data-min-cores: 20, - data-max-cores: 80, - data-set-gpus: 0, + data-min-cores: 4, + data-max-cores: 48, ] submit: submit/slurm.yml.erb \ No newline at end of file