From 01d6870c02a4ff56b8a3edd5e05d04444213e0b3 Mon Sep 17 00:00:00 2001 From: Sam Swerdlow Date: Thu, 3 Oct 2024 13:04:58 -0700 Subject: [PATCH] No public description PiperOrigin-RevId: 681999175 --- modules/sap_hana/main.tf | 52 ++++++++++++++++-- modules/sap_hana/sap_hana.tf | 4 ++ modules/sap_hana/variables.tf | 18 ++++++ modules/sap_hana_ha/main.tf | 88 +++++++++++++++++++++++++++--- modules/sap_hana_ha/sap_hana_ha.tf | 4 ++ modules/sap_hana_ha/variables.tf | 18 ++++++ 6 files changed, 171 insertions(+), 13 deletions(-) diff --git a/modules/sap_hana/main.tf b/modules/sap_hana/main.tf index 5b163a4b..02bc75c9 100644 --- a/modules/sap_hana/main.tf +++ b/modules/sap_hana/main.tf @@ -83,6 +83,8 @@ locals { num_data_disks = var.enable_data_striping ? var.number_data_disks : 1 num_log_disks = var.enable_log_striping ? var.number_log_disks : 1 + sole_tenant_name_prefix = var.sole_tenant_name_prefix != "" ? var.sole_tenant_name_prefix : "st-${lower(var.sap_hana_sid)}" + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. # All 'over provisioned' capacity is to go onto the data disk. final_disk_type = var.disk_type == "" ? (local.default_hyperdisk_extreme ? "hyperdisk-extreme" : (local.default_hyperdisk_balanced ? "hyperdisk-balanced" : "pd-ssd")) : var.disk_type @@ -408,6 +410,31 @@ resource "google_compute_address" "sap_hana_worker_ip" { address = length(var.worker_static_ips) > count.index ? var.worker_static_ips[count.index] : "" } +################################################################################ +# Sole tenant items +################################################################################ +resource "google_compute_node_template" "sole_tenant_node_template" { + count = var.sole_tenant_deployment ? 1 : 0 + name = "${local.sole_tenant_name_prefix}-node-template" + node_type = var.sole_tenant_node_type + region = local.region + project = var.project_id +} + +resource "google_compute_node_group" "sole_tenant_node_group" { + count = var.sole_tenant_deployment ? 1 : 0 + name = "${local.sole_tenant_name_prefix}-node-group" + node_template = google_compute_node_template.sole_tenant_node_template[0].name + zone = var.zone + project = var.project_id + initial_size = 1 + autoscaling_policy { + mode = "ON" + min_nodes = 1 + max_nodes = var.sap_hana_scaleout_nodes + 1 + } +} + ################################################################################ # instances ################################################################################ @@ -425,9 +452,17 @@ resource "google_compute_instance" "sap_hana_primary_instance" { } dynamic "scheduling" { - for_each = local.native_bm ? [1] : [] + for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : [] content { - on_host_maintenance = "TERMINATE" + on_host_maintenance = local.native_bm ? "TERMINATE" : null + dynamic "node_affinities" { + for_each = resource.google_compute_node_group.sole_tenant_node_group != null ? [1] : [] + content { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = ["${local.sole_tenant_name_prefix}-node-group"] + } + } } } @@ -503,7 +538,6 @@ resource "google_compute_instance" "sap_hana_primary_instance" { ] } - dynamic "reservation_affinity" { for_each = length(var.reservation_name) > 1 ? [1] : [] content { @@ -564,9 +598,17 @@ resource "google_compute_instance" "sap_hana_worker_instances" { } dynamic "scheduling" { - for_each = local.native_bm ? [1] : [] + for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : [] content { - on_host_maintenance = "TERMINATE" + on_host_maintenance = local.native_bm ? "TERMINATE" : null + dynamic "node_affinities" { + for_each = resource.google_compute_node_group.sole_tenant_node_group != null ? [1] : [] + content { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = ["${local.sole_tenant_name_prefix}-node-group"] + } + } } } diff --git a/modules/sap_hana/sap_hana.tf b/modules/sap_hana/sap_hana.tf index e79c0560..29c0a9cd 100644 --- a/modules/sap_hana/sap_hana.tf +++ b/modules/sap_hana/sap_hana.tf @@ -77,4 +77,8 @@ module "sap_hana" { # backup_disk_type = "DISK_TYPE" # default is pd-ssd, except for machines that do not support PD, in which case the default is hyperdisk-extreme. Valid types are "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme". # enable_fast_restart = true_or_false # default is true, whether to enable HANA Fast Restart # enable_data_striping = true_or_false # default is false. Enable LVM striping of data volume across multiple disks. Data striping is only intended for cases where the machine level limits are higher than the hyperdisk disk level limits. Refer to https://cloud.google.com/compute/docs/disks/hyperdisks#hd-performance-limits + + # sole_tenant_deployment = true_or_false # default is false. Whether to deploy on Sole Tenant Nodes. + # sole_tenant_node_type = "NODE_TYPE" # Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types" + # sole_tenant_name_prefix = "PREFIX" # name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st- will be used. } diff --git a/modules/sap_hana/variables.tf b/modules/sap_hana/variables.tf index 56cdf837..2c8fb045 100644 --- a/modules/sap_hana/variables.tf +++ b/modules/sap_hana/variables.tf @@ -344,6 +344,24 @@ variable "enable_data_striping" { default = false } +variable "sole_tenant_deployment" { + type = bool + description = "Optional - default is false. Deploy on Sole Tenant Nodes." + default = false +} + +variable "sole_tenant_node_type" { + type = string + description = "Optional - default is null. Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types" + default = null +} + +variable "sole_tenant_name_prefix" { + type = string + description = "Optional - name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st- will be used." + default = "" +} + # # DO NOT MODIFY unless instructed or aware of the implications of using those settings # diff --git a/modules/sap_hana_ha/main.tf b/modules/sap_hana_ha/main.tf index 1e82ea8e..e9333e78 100644 --- a/modules/sap_hana_ha/main.tf +++ b/modules/sap_hana_ha/main.tf @@ -83,6 +83,8 @@ locals { num_data_disks = var.enable_data_striping ? var.number_data_disks : 1 num_log_disks = var.enable_log_striping ? var.number_log_disks : 1 + sole_tenant_name_prefix = var.sole_tenant_name_prefix != "" ? var.sole_tenant_name_prefix : "st-${lower(var.sap_hana_sid)}" + # Minimum disk sizes are used to ensure throughput. Extreme disks don't need this. # All 'over provisioned' capacity is to go onto the data disk. final_disk_type = var.disk_type == "" ? (local.default_hyperdisk_extreme ? "hyperdisk-extreme" : (local.default_hyperdisk_balanced ? "hyperdisk-balanced" : "pd-ssd")) : var.disk_type @@ -357,6 +359,44 @@ resource "google_compute_address" "sap_hana_ha_worker_vm_ip" { length(var.secondary_worker_static_ips) > floor(count.index / 2) ? var.secondary_worker_static_ips[floor(count.index / 2)] : "") } +################################################################################ +# Sole tenant items +################################################################################ +resource "google_compute_node_template" "sole_tenant_node_template" { + count = var.sole_tenant_deployment ? 1 : 0 + name = "${local.sole_tenant_name_prefix}-node-template" + node_type = var.sole_tenant_node_type + region = local.region + project = var.project_id +} + +resource "google_compute_node_group" "sole_tenant_primary_node_group" { + count = var.sole_tenant_deployment ? 1 : 0 + name = "${local.sole_tenant_name_prefix}-primary-node-group" + node_template = google_compute_node_template.sole_tenant_node_template[0].name + zone = var.primary_zone + project = var.project_id + initial_size = 1 + autoscaling_policy { + mode = "ON" + min_nodes = 1 + max_nodes = var.sap_hana_scaleout_nodes + 1 + } +} + +resource "google_compute_node_group" "sole_tenant_secondary_node_group" { + count = var.sole_tenant_deployment ? 1 : 0 + name = "${local.sole_tenant_name_prefix}-secondary-node-group" + node_template = google_compute_node_template.sole_tenant_node_template[0].name + zone = var.secondary_zone + project = var.project_id + initial_size = 1 + autoscaling_policy { + mode = "ON" + min_nodes = 1 + max_nodes = var.sap_hana_scaleout_nodes + 1 + } +} ################################################################################ # Primary Instance ################################################################################ @@ -472,9 +512,17 @@ resource "google_compute_instance" "sap_hana_ha_primary_instance" { } dynamic "scheduling" { - for_each = local.native_bm ? [1] : [] + for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : [] content { - on_host_maintenance = "TERMINATE" + on_host_maintenance = local.native_bm ? "TERMINATE" : null + dynamic "node_affinities" { + for_each = resource.google_compute_node_group.sole_tenant_primary_node_group != null ? [1] : [] + content { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = ["${local.sole_tenant_name_prefix}-primary-node-group"] + } + } } } @@ -624,9 +672,17 @@ resource "google_compute_instance" "sap_hana_ha_primary_workers" { } dynamic "scheduling" { - for_each = local.native_bm ? [1] : [] + for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : [] content { - on_host_maintenance = "TERMINATE" + on_host_maintenance = local.native_bm ? "TERMINATE" : null + dynamic "node_affinities" { + for_each = resource.google_compute_node_group.sole_tenant_primary_node_group != null ? [1] : [] + content { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = ["${local.sole_tenant_name_prefix}-primary-node-group"] + } + } } } @@ -857,9 +913,17 @@ resource "google_compute_instance" "sap_hana_ha_secondary_instance" { } dynamic "scheduling" { - for_each = local.native_bm ? [1] : [] + for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : [] content { - on_host_maintenance = "TERMINATE" + on_host_maintenance = local.native_bm ? "TERMINATE" : null + dynamic "node_affinities" { + for_each = resource.google_compute_node_group.sole_tenant_secondary_node_group != null ? [1] : [] + content { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = ["${local.sole_tenant_name_prefix}-secondary-node-group"] + } + } } } @@ -1008,9 +1072,17 @@ resource "google_compute_instance" "sap_hana_ha_secondary_workers" { } dynamic "scheduling" { - for_each = local.native_bm ? [1] : [] + for_each = (local.native_bm || var.sole_tenant_deployment) ? [1] : [] content { - on_host_maintenance = "TERMINATE" + on_host_maintenance = local.native_bm ? "TERMINATE" : null + dynamic "node_affinities" { + for_each = resource.google_compute_node_group.sole_tenant_secondary_node_group != null ? [1] : [] + content { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = ["${local.sole_tenant_name_prefix}-secondary-node-group"] + } + } } } diff --git a/modules/sap_hana_ha/sap_hana_ha.tf b/modules/sap_hana_ha/sap_hana_ha.tf index f5db4562..f4f7bfd1 100644 --- a/modules/sap_hana_ha/sap_hana_ha.tf +++ b/modules/sap_hana_ha/sap_hana_ha.tf @@ -92,4 +92,8 @@ module "sap_hana_ha" { # backup_disk_type = "DISK_TYPE" # default is pd-ssd, except for machines that do not support PD, in which case the default is hyperdisk-extreme. Valid types are "pd-ssd", "pd-balanced", "pd-standard", "pd-extreme", "hyperdisk-balanced", "hyperdisk-extreme". # enable_fast_restart = true_or_false # default is true, whether to enable HANA Fast Restart # enable_data_striping = true_or_false # default is false. Enable LVM striping of data volume across multiple disks. Data striping is only intended for cases where the machine level limits are higher than the hyperdisk disk level limits. Refer to https://cloud.google.com/compute/docs/disks/hyperdisks#hd-performance-limits + + # sole_tenant_deployment = true_or_false # default is false. Whether to deploy on Sole Tenant Nodes. + # sole_tenant_node_type = "NODE_TYPE" # Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types" + # sole_tenant_name_prefix = "PREFIX" # name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st- will be used. } diff --git a/modules/sap_hana_ha/variables.tf b/modules/sap_hana_ha/variables.tf index 7fc35723..d05455d4 100644 --- a/modules/sap_hana_ha/variables.tf +++ b/modules/sap_hana_ha/variables.tf @@ -390,6 +390,24 @@ variable "enable_data_striping" { default = false } +variable "sole_tenant_deployment" { + type = bool + description = "Optional - default is false. Deploy on Sole Tenant Nodes." + default = false +} + +variable "sole_tenant_node_type" { + type = string + description = "Optional - default is null. Sole Tenant Node Type to use. See https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_types" + default = null +} + +variable "sole_tenant_name_prefix" { + type = string + description = "Optional - name of the prefix to use for the Sole Tenant objects (Node Templates, Node Groups). If left blank with sole_tenant_deployment=true, st- will be used." + default = "" +} + # # DO NOT MODIFY unless instructed or aware of the implications of using those settings #