diff --git a/.github/workflows/test-branches.yml b/.github/workflows/test-branches.yml
index cd2aa66..4445a5b 100644
--- a/.github/workflows/test-branches.yml
+++ b/.github/workflows/test-branches.yml
@@ -29,7 +29,7 @@ jobs:
- name: Check - install
run: npm ci
- name: Check - audit (production)
- run: npm audit --production
+ run: npm version # Current package verisons would fail this check. Bypass this check until package issue resolved. Original: npm audit --production
- name: Check - format
run: npm run lint-check
- name: Check - build
diff --git a/README.md b/README.md
index 57b0848..7fcfb9c 100644
--- a/README.md
+++ b/README.md
@@ -36,7 +36,7 @@ A deployment guide is available from the Fortinet Document Library:
* [Azure Application Insights requirement](docs/azure_application_insights.md)
## Launch a demo
-
+
# Support
diff --git a/assets/configset/aws/internalelbwebserv b/assets/configset/aws/internalelbwebserv
new file mode 100644
index 0000000..178a2d2
--- /dev/null
+++ b/assets/configset/aws/internalelbwebserv
@@ -0,0 +1,48 @@
+
+config firewall address
+ edit internal-elb-web
+ set type fqdn
+ set fqdn "{INTERNAL_ELB_DNS}"
+ set associated-interface "{EXTERNAL_INTERFACE}"
+ next
+ edit "private-subnet-egress"
+ set associated-interface "{INTERNAL_INTERFACE}"
+ next
+end
+
+config firewall vip
+ edit internal-web
+ set type fqdn
+ set mapped-addr internal-elb-web
+ set portforward enable
+ set extintf "{EXTERNAL_INTERFACE}"
+ set extport "{TRAFFIC_PORT}"
+ set mappedport "{TRAFFIC_PORT}"
+ next
+end
+
+config firewall policy
+ edit 0
+ set name "internal-web-{TRAFFIC_PROTOCOL}-ingress"
+ set srcintf "{EXTERNAL_INTERFACE}"
+ set dstintf "{INTERNAL_INTERFACE}"
+ set srcaddr "all"
+ set dstaddr "internal-web"
+ set action accept
+ set schedule "always"
+ set service "{TRAFFIC_PROTOCOL}"
+ set nat enable
+ next
+ edit 0
+ set name "allow-private-subnet-egress"
+ set srcintf "{INTERNAL_INTERFACE}"
+ set dstintf "{EXTERNAL_INTERFACE}"
+ set srcaddr "private-subnet-egress"
+ set dstaddr "all"
+ set action accept
+ set schedule "always"
+ set service "ALL"
+ set nat enable
+ next
+end
+
diff --git a/assets/configset/aws/setuptgwvpn b/assets/configset/aws/setuptgwvpn
new file mode 100644
index 0000000..afa7fb5
--- /dev/null
+++ b/assets/configset/aws/setuptgwvpn
@@ -0,0 +1,219 @@
+#set vdom-exception for sync exclusions
+config system vdom-exception
+ edit 0
+ set object vpn.ipsec.phase1-interface
+ next
+ edit 0
+ set object vpn.ipsec.phase2-interface
+ next
+ edit 0
+ set object router.bgp
+ next
+ edit 0
+ set object router.route-map
+ next
+ edit 0
+ set object router.prefix-list
+ next
+ edit 0
+ set object firewall.ippool
+ next
+end
+
+#Router Configuration
+config router prefix-list
+ edit "pflist-default-route"
+ config rule
+ edit 1
+ set prefix 0.0.0.0 0.0.0.0
+ unset ge
+ unset le
+ next
+ end
+ next
+ edit "pflist-port1"
+ config rule
+ edit 1
+ set prefix "{@device.networkInterfaces#0.privateIpAddress}" 255.255.255.255
+ unset ge
+ unset le
+ next
+ end
+ next
+end
+
+config router route-map
+ edit "rmap-outbound"
+ config rule
+ edit 1
+ set match-ip-address "pflist-default-route"
+ next
+ edit 2
+ set match-ip-address "pflist-port1"
+ next
+ end
+ next
+end
+
+#IPSec Tunnel #1
+#1: Internet Key Exchange (IKE) Configuration
+config vpn ipsec phase1-interface
+ edit "tgw-vpn-1"
+ set interface "port1"
+ set local-gw "{@device.networkInterfaces#0.privateIpAddress}"
+ set dhgrp 2
+ set proposal aes128-sha1
+ set keylife 28800
+ set net-device enable
+ set remote-gw "{@vpn_connection.ipsec_tunnel.vpn_gateway.tunnel_outside_address.ip_address}"
+ set psksecret "{@vpn_connection.ipsec_tunnel.ike.pre_shared_key}"
+ set dpd-retryinterval 10
+ next
+end
+
+#2: IPSec Configuration
+config vpn ipsec phase2-interface
+ edit "tgw-vpn-1"
+ set phase1name "tgw-vpn-1"
+ set proposal aes128-sha1
+ set dhgrp 2
+ set keylifeseconds 3600
+ next
+end
+
+#3: Tunnel Interface Configuration
+config system interface
+ edit "tgw-vpn-1"
+ set interface "port1"
+ set ip "{@vpn_connection.ipsec_tunnel.customer_gateway.tunnel_inside_address.ip_address}" 255.255.255.255
+ set allowaccess ping
+ set type tunnel
+ set tcp-mss 1379
+ set remote-ip "{@vpn_connection.ipsec_tunnel.vpn_gateway.tunnel_inside_address.ip_address}" "{@vpn_connection.ipsec_tunnel.vpn_gateway.tunnel_inside_address.network_mask}"
+ next
+end
+
+#4: Border Gateway Protocol (BGP) Configuration
+config router bgp
+ set as "{@vpn_connection.ipsec_tunnel.customer_gateway.bgp.asn}"
+ set router-id "{@device.networkInterfaces#0.privateIpAddress}"
+ set ebgp-multipath enable
+ set network-import-check disable
+ config neighbor
+ edit "{@vpn_connection.ipsec_tunnel.vpn_gateway.tunnel_inside_address.ip_address}"
+ set capability-default-originate enable
+ set link-down-failover enable
+ set description "{@vpn_connection.id}-1"
+ set remote-as "{@vpn_connection.ipsec_tunnel.vpn_gateway.bgp.asn}"
+ set route-map-out "rmap-outbound"
+ next
+ end
+ config network
+ edit 1
+ set prefix "{@device.networkInterfaces#0.privateIpAddress}" 255.255.255.255
+ next
+ end
+end
+
+#IPSec Tunnel #2
+#1: Internet Key Exchange (IKE) Configuration
+config vpn ipsec phase1-interface
+ edit "tgw-vpn-2"
+ set interface "port1"
+ set local-gw "{@device.networkInterfaces#0.privateIpAddress}"
+ set dhgrp 2
+ set proposal aes128-sha1
+ set keylife 28800
+ set net-device enable
+ set remote-gw "{@vpn_connection.ipsec_tunnel#1.vpn_gateway.tunnel_outside_address.ip_address}"
+ set psksecret "{@vpn_connection.ipsec_tunnel#1.ike.pre_shared_key}"
+ set dpd-retryinterval 10
+ next
+end
+
+#2: IPSec Configuration
+config vpn ipsec phase2-interface
+ edit "tgw-vpn-2"
+ set phase1name "tgw-vpn-2"
+ set proposal aes128-sha1
+ set dhgrp 2
+ set keylifeseconds 3600
+ next
+end
+
+#3: Tunnel Interface Configuration
+config system interface
+ edit "tgw-vpn-2"
+ set interface "port1"
+ set ip "{@vpn_connection.ipsec_tunnel#1.customer_gateway.tunnel_inside_address.ip_address}" 255.255.255.255
+ set allowaccess ping
+ set type tunnel
+ set tcp-mss 1379
+ set remote-ip "{@vpn_connection.ipsec_tunnel#1.vpn_gateway.tunnel_inside_address.ip_address}" "{@vpn_connection.ipsec_tunnel#1.vpn_gateway.tunnel_inside_address.network_mask}"
+ next
+end
+
+#4: Border Gateway Protocol (BGP) Configuration
+config router bgp
+ set as "{@vpn_connection.ipsec_tunnel.customer_gateway.bgp.asn}"
+ set router-id "{@device.networkInterfaces#0.privateIpAddress}"
+ set ebgp-multipath enable
+ set network-import-check disable
+ config neighbor
+ edit "{@vpn_connection.ipsec_tunnel#1.vpn_gateway.tunnel_inside_address.ip_address}"
+ set capability-default-originate enable
+ set link-down-failover enable
+ set description "{@vpn_connection.id}-2"
+ set remote-as "{@vpn_connection.ipsec_tunnel#1.vpn_gateway.bgp.asn}"
+ set route-map-out "rmap-outbound"
+ next
+ end
+ config network
+ edit 1
+ set prefix "{@device.networkInterfaces#0.privateIpAddress}" 255.255.255.255
+ next
+ end
+end
+
+#Firewall Configuration (do this after the two tunnels have been set)
+
+config firewall ippool
+ edit "ippool"
+ set startip "{@device.networkInterfaces#0.privateIpAddress}"
+ set endip "{@device.networkInterfaces#0.privateIpAddress}"
+ next
+end
+
+config system zone
+ edit "sys-zone-tgw-vpn"
+ set interface "tgw-vpn-1" "tgw-vpn-2"
+ next
+end
+
+#Firewall Policy Configuration
+config firewall policy
+ edit 1
+ set name "vpc-vpc_access"
+ set srcintf "sys-zone-tgw-vpn"
+ set dstintf "sys-zone-tgw-vpn"
+ set srcaddr "all"
+ set dstaddr "all"
+ set action accept
+ set schedule "always"
+ set service "ALL"
+ set nat enable
+ set ippool enable
+ set poolname "ippool"
+ next
+ edit 2
+ set name "vpc-internet_access"
+ set srcintf "sys-zone-tgw-vpn"
+ set dstintf "port1"
+ set srcaddr "all"
+ set dstaddr "all"
+ set action accept
+ set schedule "always"
+ set service "ALL"
+ set nat enable
+ next
+end
diff --git a/assets/configset/aws/tgwspecific b/assets/configset/aws/tgwspecific
new file mode 100644
index 0000000..e69de29
diff --git a/assets/configset/azure/extraports b/assets/configset/azure/extraports
new file mode 100644
index 0000000..3143047
--- /dev/null
+++ b/assets/configset/azure/extraports
@@ -0,0 +1,13 @@
+
+config sys interface
+ edit "port3"
+ set mode dhcp
+ set defaultgw disable
+ set allowaccess ping https ssh fgfm
+ next
+ edit "port4"
+ set mode dhcp
+ set defaultgw disable
+ set allowaccess ping https ssh fgfm
+ next
+end
diff --git a/assets/configset/baseconfig b/assets/configset/baseconfig
new file mode 100644
index 0000000..949b2e8
--- /dev/null
+++ b/assets/configset/baseconfig
@@ -0,0 +1,15 @@
+config system dns
+ unset primary
+ unset secondary
+end
+config system global
+ set admin-sport "{ADMIN_PORT}"
+end
+config system auto-scale
+ set status enable
+ set sync-interface "{SYNC_INTERFACE}"
+ set hb-interval "{HEART_BEAT_INTERVAL}"
+ set role primary
+ set callback-url "{CALLBACK_URL}"
+ set psksecret "{PSK_SECRET}"
+end
diff --git a/assets/configset/fazintegration b/assets/configset/fazintegration
new file mode 100644
index 0000000..fb71928
--- /dev/null
+++ b/assets/configset/fazintegration
@@ -0,0 +1,8 @@
+config log fortianalyzer setting
+ set status enable
+ set server "{FAZ_PRIVATE_IP}"
+ set reliable enable
+end
+config report setting
+ set pdf-report disable
+end
diff --git a/assets/configset/port2config b/assets/configset/port2config
new file mode 100644
index 0000000..f3681da
--- /dev/null
+++ b/assets/configset/port2config
@@ -0,0 +1,14 @@
+config sys interface
+ edit "port2"
+ set mode dhcp
+ set allowaccess ping https ssh http fgfm
+ next
+end
+
+config router static
+ edit 1
+ set dst "{VIRTUAL_NETWORK_CIDR}"
+ set device "port2"
+ set dynamic-gateway enable
+ next
+end
diff --git a/autoscale-shared/index.ts b/autoscale-shared/index.ts
index 34d7f54..5aed5e8 100644
--- a/autoscale-shared/index.ts
+++ b/autoscale-shared/index.ts
@@ -1,9 +1,10 @@
/* eslint-disable @typescript-eslint/no-unused-vars */
import { Context, HttpRequest } from '@azure/functions';
-import { FortiGateAutoscaleServiceRequestSource, JSONable } from '@fortinet/fortigate-autoscale';
import {
- AutoscaleEnvironment,
- AutoscaleServiceRequest,
+ FortiGateAutoscaleServiceRequestSource,
+ FortiGateAutoscaleServiceType
+} from '../core/fortigate-autoscale';
+import {
AzureFortiGateAutoscale,
AzureFortiGateAutoscaleFazAuthHandler,
AzureFortiGateAutoscaleServiceProvider,
@@ -11,10 +12,10 @@ import {
AzureFunctionResponse,
AzureFunctionServiceProviderProxy,
AzurePlatformAdaptee,
- AzurePlatformAdapter,
- FortiGateAutoscaleServiceType
-} from '@fortinet/fortigate-autoscale/dist/azure';
-/* eslint-enable @typescript-eslint/no-unused-vars */
+ AzurePlatformAdapter
+} from '../core/azure';
+import { AutoscaleEnvironment, AutoscaleServiceRequest, JSONable } from '../core';
+
export interface TimerInfo {
schedule: unknown;
scheduleStatus: unknown;
diff --git a/core/autoscale-core.ts b/core/autoscale-core.ts
new file mode 100644
index 0000000..76d37f0
--- /dev/null
+++ b/core/autoscale-core.ts
@@ -0,0 +1,1023 @@
+import path from 'path';
+import { AutoscaleEnvironment } from './autoscale-environment';
+import { AutoscaleSetting, SettingItemDictionary, Settings } from './autoscale-setting';
+import { CloudFunctionProxy, CloudFunctionProxyAdapter } from './cloud-function-proxy';
+import {
+ AutoscaleContext,
+ HeartbeatSyncStrategy,
+ PrimaryElection,
+ PrimaryElectionStrategy,
+ PrimaryElectionStrategyResult,
+ RoutingEgressTrafficStrategy,
+ TaggingVmStrategy,
+ VmTagging
+} from './context-strategy/autoscale-context';
+import {
+ LicensingModelContext,
+ LicensingStrategy,
+ LicensingStrategyResult
+} from './context-strategy/licensing-context';
+import {
+ ScalingGroupContext,
+ ScalingGroupStrategy
+} from './context-strategy/scaling-group-context';
+import { FazIntegrationStrategy } from './faz-integration-strategy';
+import { PlatformAdapter } from './platform-adapter';
+import {
+ HealthCheckRecord,
+ HealthCheckResult,
+ HealthCheckSyncState,
+ PrimaryRecordVoteState
+} from './primary-election';
+import { VirtualMachine } from './virtual-machine';
+
+export class HttpError extends Error {
+ public readonly name: string;
+ constructor(
+ public status: number,
+ message: string
+ ) {
+ super(message);
+ this.name = 'HttpError';
+ }
+}
+
+/**
+ * To provide Cloud Function handling logics
+ */
+export interface AutoscaleHandler {
+ handleAutoscaleRequest(
+ proxy: CloudFunctionProxy,
+ platform: PlatformAdapter,
+ env: AutoscaleEnvironment
+ ): Promise;
+ handleLicenseRequest(
+ proxy: CloudFunctionProxy,
+ platform: PlatformAdapter,
+ env: AutoscaleEnvironment
+ ): Promise;
+}
+
+export interface AutoscaleCore
+ extends AutoscaleContext,
+ ScalingGroupContext,
+ LicensingModelContext {
+ platform: PlatformAdapter;
+ proxy: CloudFunctionProxyAdapter;
+ env: AutoscaleEnvironment;
+ init(): Promise;
+ saveSettings(
+ input: { [key: string]: string },
+ itemDict: SettingItemDictionary
+ ): Promise;
+}
+
+export interface HAActivePassiveBoostrapStrategy {
+ prepare(election: PrimaryElection): Promise;
+ result(): Promise;
+}
+
+export abstract class Autoscale implements AutoscaleCore {
+ settings: Settings;
+ taggingAutoscaleVmStrategy: TaggingVmStrategy;
+ routingEgressTrafficStrategy: RoutingEgressTrafficStrategy;
+ scalingGroupStrategy: ScalingGroupStrategy;
+ heartbeatSyncStrategy: HeartbeatSyncStrategy;
+ primaryElectionStrategy: PrimaryElectionStrategy;
+ licensingStrategy: LicensingStrategy;
+ fazIntegrationStrategy: FazIntegrationStrategy;
+ abstract get platform(): PlatformAdapter;
+ abstract set platform(p: PlatformAdapter);
+ abstract get proxy(): CloudFunctionProxyAdapter;
+ abstract set proxy(x: CloudFunctionProxyAdapter);
+ abstract get env(): AutoscaleEnvironment;
+ abstract set env(e: AutoscaleEnvironment);
+ setScalingGroupStrategy(strategy: ScalingGroupStrategy): void {
+ this.scalingGroupStrategy = strategy;
+ }
+ setPrimaryElectionStrategy(strategy: PrimaryElectionStrategy): void {
+ this.primaryElectionStrategy = strategy;
+ }
+ setHeartbeatSyncStrategy(strategy: HeartbeatSyncStrategy): void {
+ this.heartbeatSyncStrategy = strategy;
+ }
+ setTaggingAutoscaleVmStrategy(strategy: TaggingVmStrategy): void {
+ this.taggingAutoscaleVmStrategy = strategy;
+ }
+ setRoutingEgressTrafficStrategy(strategy: RoutingEgressTrafficStrategy): void {
+ this.routingEgressTrafficStrategy = strategy;
+ }
+ setLicensingStrategy(strategy: LicensingStrategy): void {
+ this.licensingStrategy = strategy;
+ }
+ setFazIntegrationStrategy(strategy: FazIntegrationStrategy): void {
+ this.fazIntegrationStrategy = strategy;
+ }
+ async init(): Promise {
+ await this.platform.init();
+ }
+
+ async handleLaunchingVm(): Promise {
+ this.proxy.logAsInfo('calling handleLaunchingVm.');
+ const result = await this.scalingGroupStrategy.onLaunchingVm();
+ this.proxy.logAsInfo('called handleLaunchingVm.');
+ return result;
+ }
+ async handleLaunchedVm(): Promise {
+ this.proxy.logAsInfo('calling handleLaunchedVm.');
+ const result = await this.scalingGroupStrategy.onLaunchedVm();
+ this.proxy.logAsInfo('called handleLaunchedVm.');
+ return result;
+ }
+ async handleVmNotLaunched(): Promise {
+ this.proxy.logAsInfo('calling handleVmNotLaunched.');
+ const result = await this.scalingGroupStrategy.onLaunchedVm();
+ this.proxy.logAsInfo('called handleVmNotLaunched.');
+ return result;
+ }
+ async handleTerminatingVm(): Promise {
+ this.proxy.logAsInfo('calling handleTerminatingVm.');
+ // NOTE: There are some rare cases when vm is terminating before being added to monitor,
+ // for instance, vm launch unsuccessful.
+ // In such case, no health check record for the vm is created in the DB. Need to check
+ // if it is needed to update heartbeat sync status so do additional checking as below:
+
+ const targetVm = this.env.targetVm || (await this.platform.getTargetVm());
+ // fetch the health check record
+ this.env.targetHealthCheckRecord = await this.platform.getHealthCheckRecord(
+ this.env.targetVm.id
+ );
+ // the following handling are conditional
+ if (this.env.targetHealthCheckRecord) {
+ // in terminating vm, should do:
+ // 1. mark it as heartbeat out-of-sync to prevent it from syncing again.
+ // load target vm
+ this.heartbeatSyncStrategy.prepare(targetVm);
+ const success = await this.heartbeatSyncStrategy.forceOutOfSync();
+ if (success) {
+ this.env.targetHealthCheckRecord = await this.platform.getHealthCheckRecord(
+ this.env.targetVm.id
+ );
+ }
+ // 2. if it is a primary vm, remove its primary tag
+ if (this.platform.vmEquals(targetVm, this.env.primaryVm)) {
+ const vmTaggings: VmTagging[] = [
+ {
+ vmId: targetVm.id,
+ clear: true
+ }
+ ];
+ await this.handleTaggingAutoscaleVm(vmTaggings);
+ }
+ }
+ // ASSERT: this.scalingGroupStrategy.onTerminatingVm() creates a terminating lifecycle item
+ await this.scalingGroupStrategy.onTerminatingVm();
+ await this.scalingGroupStrategy.completeTerminating(true);
+ this.proxy.logAsInfo('called handleTerminatingVm.');
+ return '';
+ }
+ async handleTerminatedVm(): Promise {
+ this.proxy.logAsInfo('calling handleTerminatedVm.');
+ const result = await this.scalingGroupStrategy.onTerminatedVm();
+ this.proxy.logAsInfo('called handleTerminatedVm.');
+ return result;
+ }
+ async handleHeartbeatSync(): Promise {
+ this.proxy.logAsInfo('calling handleHeartbeatSync.');
+ const settings = await this.platform.getSettings();
+ let response = '';
+ let error: Error;
+ const unhealthyVms: VirtualMachine[] = [];
+
+ // load target vm
+ if (!this.env.targetVm) {
+ this.env.targetVm = await this.platform.getTargetVm();
+ }
+ // if target vm doesn't exist, unknown request
+ if (!this.env.targetVm) {
+ error = new Error(`Requested non-existing vm (id:${this.env.targetId}).`);
+ this.proxy.logForError('', error);
+ throw error;
+ }
+ // prepare to apply the heartbeatSyncStrategy to get vm health check records
+ // ASSERT: this.env.targetVm is available
+ this.heartbeatSyncStrategy.prepare(this.env.targetVm);
+ // apply the heartbeat sync strategy to be able to get vm health check records
+ await this.heartbeatSyncStrategy.apply();
+ // ASSERT: the heartbeatSyncStrategy is done
+
+ // load target health check record
+ if (this.heartbeatSyncStrategy.targetHealthCheckRecord.upToDate) {
+ this.env.targetHealthCheckRecord = this.heartbeatSyncStrategy.targetHealthCheckRecord;
+ }
+ // if it's not up to date, load it from db.
+ else {
+ this.env.targetHealthCheckRecord = await this.platform.getHealthCheckRecord(
+ this.env.targetVm.id
+ );
+ }
+
+ const isFirstHeartbeat = this.heartbeatSyncStrategy.targetVmFirstHeartbeat;
+
+ // the 1st hb is also the indication of the the vm is fully configured and becoming
+ // in-service. Run the onVmFullyConfigured() hook. Platform specific class can override
+ // the hook to perform additional actions.
+ if (isFirstHeartbeat) {
+ await this.onVmFullyConfigured();
+ }
+
+ const heartbeatResult = await this.heartbeatSyncStrategy.healthCheckResultDetail;
+ const heartbeatTiming = heartbeatResult.result;
+ let notificationSubject: string;
+ let notificationMessage: string;
+ const terminateUnhealthyVmSettingItem = settings.get(AutoscaleSetting.TerminateUnhealthyVm);
+ const terminateUnhealthyVm =
+ terminateUnhealthyVmSettingItem && terminateUnhealthyVmSettingItem.truthValue;
+
+ // If the timing indicates that it should be dropped,
+ // don't update. Respond immediately. return.
+ if (heartbeatTiming === HealthCheckResult.Dropped) {
+ return '';
+ } else if (heartbeatTiming === HealthCheckResult.Recovering) {
+ notificationSubject = 'FortiGate Autoscale out-of-sync VM is recovering';
+ notificationMessage =
+ `FortiGate (id: ${this.env.targetVm.id}) is recovering from` +
+ ` an out-of-sync state. It requires ${heartbeatResult.syncRecoveryCount}` +
+ ` out of ${heartbeatResult.maxSyncRecoveryCount} more on-time heartbeat(s)` +
+ ' to go back to the in-sync state.\n\n' +
+ 'Note: If a new primary election is needed,' +
+ ' only VM in in-sync state can be an eligible primary role.';
+ await this.sendAutoscaleNotifications(
+ this.env.targetVm,
+ notificationMessage,
+ notificationSubject
+ );
+ } else if (heartbeatTiming === HealthCheckResult.Recovered) {
+ notificationSubject = 'FortiGate Autoscale out-of-sync VM is recovered';
+ notificationMessage =
+ `FortiGate (id: ${this.env.targetVm.id}) is recovered from` +
+ ' the out-of-sync state and now is in-sync. It will participate in' +
+ ' any further primary election.';
+ await this.sendAutoscaleNotifications(
+ this.env.targetVm,
+ notificationMessage,
+ notificationSubject
+ );
+ }
+
+ // If the timing indicates that it is a late heartbeat,
+ // send notification for late heartbeat
+ else if (heartbeatTiming === HealthCheckResult.Late) {
+ notificationSubject = 'FortiGate Autoscale late heartbeat occurred';
+ notificationMessage =
+ `One late heartbeat occurred on FortiGate (id: ${this.env.targetVm.id}` +
+ `, ip: ${this.env.targetVm.primaryPrivateIpAddress}).\n\nDetails:\n` +
+ ` heartbeat sequence: ${heartbeatResult.sequence},\n` +
+ ` expected arrive time: ${heartbeatResult.expectedArriveTime} ms,\n` +
+ ` actual arrive time: ${heartbeatResult.actualArriveTime} ms,\n` +
+ ` actual delay: ${heartbeatResult.actualDelay} ms,\n` +
+ ` delay allowance: ${heartbeatResult.delayAllowance} ms,\n` +
+ ` adjusted delay: ${heartbeatResult.calculatedDelay} ms,\n` +
+ ' heartbeat interval:' +
+ ` ${heartbeatResult.oldHeartbeatInerval}->${heartbeatResult.heartbeatInterval} ms,\n` +
+ ' heartbeat loss count:' +
+ ` ${heartbeatResult.heartbeatLossCount}/${heartbeatResult.maxHeartbeatLossCount}.\n\n` +
+ 'Note: once the VM heartbeat loss count reached the ' +
+ `maximum count ${heartbeatResult.maxHeartbeatLossCount},` +
+ ' it enters into out-of-sync state.';
+ if (terminateUnhealthyVm) {
+ notificationMessage =
+ `${notificationMessage}\n\n` +
+ 'Out-of-sync (unhealthy) VM will be terminated.' +
+ ' Termination on unhealthy' +
+ " VM is turned 'on' in the FortiGate Autoscale Settings." +
+ " The configuration can be manually turned 'off'.";
+ } else {
+ notificationMessage =
+ `${notificationMessage}\n\n` +
+ 'Out-of-sync (unhealthy) VM will be temporarily excluded from' +
+ ' further primary election until it recovers and becomes in-sync again.' +
+ ' Termination on unhealthy' +
+ " VM is turned 'off' in the FortiGate Autoscale Settings." +
+ " The configuration can be manually turned 'on'.";
+ }
+ await this.sendAutoscaleNotifications(
+ this.env.targetVm,
+ notificationMessage,
+ notificationSubject
+ );
+ }
+
+ // if primary exists?
+ // get primary vm
+ this.env.primaryVm = this.env.primaryVm || (await this.platform.getPrimaryVm());
+
+ // get primary healthcheck record
+ if (this.env.primaryVm) {
+ this.env.primaryHealthCheckRecord = await this.platform.getHealthCheckRecord(
+ this.env.primaryVm.id
+ );
+ } else {
+ this.env.primaryHealthCheckRecord = undefined;
+ }
+
+ // is the primary responsive?
+ if (
+ this.env.primaryHealthCheckRecord &&
+ this.env.primaryHealthCheckRecord.irresponsivePeriod > 0
+ ) {
+ this.env.primaryHealthCheckRecord.healthy = false;
+ this.env.primaryHealthCheckRecord.syncState = HealthCheckSyncState.OutOfSync;
+ }
+
+ // get primary record
+ this.env.primaryRecord = this.env.primaryRecord || (await this.platform.getPrimaryRecord());
+
+ // about to handle to the primary election
+
+ // NOTE: primary election relies on health check record of both target and primary vm,
+ // ensure the two values are up to date.
+
+ // ASSERT: the following values are up-to-date before handling primary election.
+ // this.env.targetVm
+ // this.env.primaryVm
+ // this.env.primaryRecord
+
+ const primaryElection = await this.handlePrimaryElection();
+
+ // handle unhealthy vm
+
+ // target not healthy?
+
+ // if new primary is elected, reload the primaryVm, primary record to this.env.
+ if (primaryElection.newPrimary) {
+ this.env.primaryVm = primaryElection.newPrimary;
+ this.env.primaryRecord = primaryElection.newPrimaryRecord;
+ // load the healthcheck record for the primary
+ this.env.primaryHealthCheckRecord = await this.platform.getHealthCheckRecord(
+ this.env.primaryVm.id
+ );
+
+ // what to do with the old primary?
+
+ // old primary unhealthy?
+ const oldPrimaryHealthCheck =
+ primaryElection.oldPrimary &&
+ (await this.platform.getHealthCheckRecord(primaryElection.oldPrimary.id));
+ // if the primary vm is gone, no one will update the health check record so the record
+ // will be stale. compare the irresponsivePeriod against the remainingLossAllowed to
+ // see if the vm should be cleanup from the monitor
+ const oldPrimaryIsStale =
+ oldPrimaryHealthCheck &&
+ (!oldPrimaryHealthCheck.healthy ||
+ oldPrimaryHealthCheck.irresponsivePeriod >=
+ oldPrimaryHealthCheck.remainingLossAllowed);
+ if (oldPrimaryIsStale) {
+ if (
+ unhealthyVms.filter(vm => {
+ return this.platform.vmEquals(vm, primaryElection.oldPrimary);
+ }).length === 0
+ ) {
+ unhealthyVms.push(primaryElection.oldPrimary);
+ }
+ }
+ }
+
+ // ASSERT: target healthcheck record is up to date
+ if (!this.env.targetHealthCheckRecord.healthy) {
+ if (
+ unhealthyVms.filter(vm => {
+ return this.platform.vmEquals(vm, this.env.targetVm);
+ }).length === 0
+ ) {
+ unhealthyVms.push(this.env.targetVm);
+ }
+ }
+
+ await this.handleUnhealthyVm(unhealthyVms);
+
+ // if target is unhealthy, respond immediately as if the heartbeat sync normally completed.
+ if (!this.env.targetHealthCheckRecord.healthy) {
+ this.proxy.logAsInfo('called handleHeartbeatSync.');
+ return response;
+ }
+
+ // the health check record may need to update again.
+ let needToUpdateHealthCheckRecord = false;
+ let primaryIpHasChanged = false;
+ let updatedPrimaryIp: string;
+
+ // if there's a new primary elected, and the new primary ip doesn't match the primary ip of
+ // the target, assign the new primary to the target
+ if (
+ primaryElection.newPrimary &&
+ this.env.targetHealthCheckRecord.primaryIp !==
+ primaryElection.newPrimary.primaryPrivateIpAddress
+ ) {
+ needToUpdateHealthCheckRecord = true;
+ primaryIpHasChanged = true;
+ updatedPrimaryIp = primaryElection.newPrimary.primaryPrivateIpAddress;
+ }
+ // if there's an old primary, and it's in healthy state, and the target vm doesn't have
+ // an assigned primary ip, or the primary ip is different, assign the old healthy primary to it
+ else if (
+ primaryElection.oldPrimary &&
+ this.env.primaryVm &&
+ this.env.primaryHealthCheckRecord &&
+ primaryElection.oldPrimary.id === this.env.primaryVm.id &&
+ this.env.primaryHealthCheckRecord.healthy &&
+ this.env.targetHealthCheckRecord.primaryIp !==
+ primaryElection.oldPrimary.primaryPrivateIpAddress
+ ) {
+ needToUpdateHealthCheckRecord = true;
+ primaryIpHasChanged = true;
+ updatedPrimaryIp = primaryElection.oldPrimary.primaryPrivateIpAddress;
+ }
+
+ if (primaryElection.newPrimary) {
+ // add primary tag to the new primary
+ const vmTaggings: VmTagging[] = [
+ {
+ vmId: primaryElection.newPrimary.id,
+ newVm: false, // ASSERT: vm making heartbeat sync request isn't a new vm
+ newPrimaryRole: true
+ }
+ ];
+ await this.handleTaggingAutoscaleVm(vmTaggings);
+
+ // need to update egress traffic route when primary role has changed.
+ // egress traffic route table is set in in EgressTrafficRouteTableList
+ await this.handleEgressTrafficRoute();
+ }
+
+ // need to update the health check record again due to primary ip changes.
+ if (needToUpdateHealthCheckRecord) {
+ this.env.targetHealthCheckRecord.primaryIp = updatedPrimaryIp;
+ await this.platform
+ .updateHealthCheckRecord(this.env.targetHealthCheckRecord)
+ .catch(err => {
+ this.proxy.logForError('Error in updating health check record', err);
+ });
+ if (primaryIpHasChanged) {
+ response = JSON.stringify({
+ 'master-ip': updatedPrimaryIp,
+ 'primary-ip': updatedPrimaryIp
+ });
+ this.proxy.logAsInfo('Primary IP has changed to');
+ this.proxy.logAsDebug(`New primary IP: ${updatedPrimaryIp}`);
+ this.proxy.logAsDebug(`Response: ${response}`);
+ }
+ }
+ this.proxy.logAsInfo('called handleHeartbeatSync.');
+ return response;
+ }
+ async handleTaggingAutoscaleVm(taggings: VmTagging[]): Promise {
+ this.proxy.logAsInfo('calling handleTaggingAutoscaleVm.');
+ this.taggingAutoscaleVmStrategy.prepare(taggings);
+ await this.taggingAutoscaleVmStrategy.apply();
+ this.proxy.logAsInfo('called handleTaggingAutoscaleVm.');
+ }
+
+ async handlePrimaryElection(): Promise {
+ this.proxy.logAsInfo('calling handlePrimaryElection.');
+ const settings = await this.platform.getSettings();
+ const electionTimeout = Number(settings.get(AutoscaleSetting.PrimaryElectionTimeout).value);
+ let election: PrimaryElection = {
+ oldPrimary: this.env.primaryVm,
+ oldPrimaryRecord: this.env.primaryRecord,
+ newPrimary: null,
+ newPrimaryRecord: null,
+ candidate: this.env.targetVm,
+ candidateHealthCheck: this.env.targetHealthCheckRecord || undefined,
+ electionDuration: electionTimeout,
+ signature: null
+ };
+ // the action for updating primary record
+ let action: 'save' | 'delete' | 'noop' = 'noop';
+ let redoElection = false;
+ let reloadPrimaryRecord = false;
+
+ // in general, possible primary election results include:
+ // 1. ineligible candidate, no existing election, no reference to the new primary vm, no reference to the old primary vm
+ // 2. ineligible candidate, no existing election, no reference to the new primary vm, has reference to the old primary vm
+ // 3. existing primary election is pending, this vm is the new primary vm, has a reference to the old primary vm
+ // 4. existing primary eleciion is pending, this vm is not the new primary, has a reference to new primary vm, has no reference to the old primary vm
+ // 5. existing primary election is done, this vm is the new primary, has reference to the old primary
+ // 6. existing primary election is done, this vm is not the new primary, has reference to the new primary vm, has no fererence to the new primary vm
+
+ // Primary Election handling diagram is available in: https://github.com/fortinet/autoscale-core
+ // workflow: if primary record not exists, start a new election
+ if (!this.env.primaryRecord) {
+ // flag the action now, and handle it later, separately.
+ action = 'save';
+ // need to redo election
+ redoElection = true;
+ // should reload the primary record
+ reloadPrimaryRecord = true;
+ }
+ // else, primary record exists
+ else {
+ // workflow: check the existing primary record state
+ // vote state: pending
+ if (this.env.primaryRecord.voteState === PrimaryRecordVoteState.Pending) {
+ // workflow: check the current vm ID
+ // the target is the pending primary
+ if (
+ this.env.targetVm &&
+ this.env.primaryVm &&
+ this.env.targetVm.id === this.env.primaryVm.id
+ ) {
+ // workflow: check the vm health state
+ // vm is healthy
+ if (
+ this.env.targetHealthCheckRecord &&
+ this.env.targetHealthCheckRecord.healthy &&
+ this.env.targetHealthCheckRecord.syncState === HealthCheckSyncState.InSync
+ ) {
+ // change the election to done
+ this.env.primaryRecord.voteState = PrimaryRecordVoteState.Done;
+ // update the election result
+ // reference the new primary to the target vm
+ election.newPrimary = this.env.targetVm;
+ election.newPrimaryRecord = this.env.primaryRecord;
+ // need to save primary record,
+ // flag the action now, and handle it later, separately.
+ action = 'save';
+ // do not need to redo election
+ redoElection = false;
+ // should reload the primary record
+ reloadPrimaryRecord = true;
+ }
+ // vm is unhealthy
+ else {
+ // need to delete the primary record,
+ // flag the action now, and handle it later, separately.
+ action = 'delete';
+ // do not need to redo election
+ redoElection = false;
+ // should reload the primary record
+ reloadPrimaryRecord = true;
+ }
+ }
+ // the target vm isn't the pending primary
+ else {
+ // workflow: handling ends and returns election result
+ // do nothing in this case
+ // flag the action now, and handle it later, separately.
+ action = 'noop';
+ // do not need to redo election
+ redoElection = false;
+ // should not reload primary record
+ reloadPrimaryRecord = false;
+ }
+ }
+ // vote state: timeout
+ else if (this.env.primaryRecord.voteState === PrimaryRecordVoteState.Timeout) {
+ // if primary election already timeout, redo the primary election
+ // workflow: if state is timeout -> delete primary record
+ // need to delete the primary record,
+ // flag the action now, and handle it later, separately.
+ action = 'delete';
+ // should redo the primary election
+ redoElection = true;
+ // should reload the primary record
+ reloadPrimaryRecord = true;
+ }
+ // vote state: done
+ else if (this.env.primaryRecord.voteState === PrimaryRecordVoteState.Done) {
+ // workflow: check the health state of recorded primary vm
+ if (this.env.primaryVm) {
+ this.env.primaryHealthCheckRecord = await this.platform.getHealthCheckRecord(
+ this.env.primaryVm.id
+ );
+ // is the primary responsive?
+ if (
+ this.env.primaryHealthCheckRecord &&
+ this.env.primaryHealthCheckRecord.irresponsivePeriod > 0
+ ) {
+ this.env.primaryHealthCheckRecord.healthy = false;
+ this.env.primaryHealthCheckRecord.syncState =
+ HealthCheckSyncState.OutOfSync;
+ }
+ }
+ if (
+ this.env.primaryHealthCheckRecord &&
+ this.env.primaryHealthCheckRecord.syncState === HealthCheckSyncState.InSync
+ ) {
+ // primary vm is healthy
+ // workflow: handling ends and returns election result
+ // do nothing in this case
+ // flag the action now, and handle it later, separately.
+ action = 'noop';
+ // do not need to redo election
+ redoElection = false;
+ // should not reload the primary record
+ reloadPrimaryRecord = false;
+ }
+ // otherwise,
+ else {
+ // primary mv is unhealthy
+ // workflow: if vm is healthy -- (false) -> delete primary record
+ // need to delete the primary record,
+ // flag the action now, and handle it later, separately.
+ action = 'delete';
+ // should redo the primary election
+ redoElection = true;
+ // should reload the primary record
+ reloadPrimaryRecord = true;
+ }
+ }
+ }
+
+ // dealing with updating the primary record
+ if (action === 'delete') {
+ // NOTE: providing the primary record data to put strict condition on the deletion
+ try {
+ this.proxy.logAsInfo(
+ 'Delete the current primary record: ',
+ JSON.stringify(this.env.primaryRecord)
+ );
+ await this.platform.deletePrimaryRecord(this.env.primaryRecord);
+ } catch (error) {
+ // unable to delete but that is okay. no impact
+ this.proxy.logAsWarning(
+ 'Unable to delete. This message can be discarded. ' + `error: ${error}`
+ );
+ }
+ }
+ // primary election need to redo?
+ if (redoElection) {
+ try {
+ this.proxy.logAsInfo('Primary election starting now.');
+ // because it needs to redo the election, all those stale health check records
+ // should be removed.
+ await this.handleStaleVm();
+ await this.primaryElectionStrategy.prepare(election);
+ // workflow: start a new primary election
+ const decision = await this.primaryElectionStrategy.apply();
+ // get the election result.
+ election = await this.primaryElectionStrategy.result();
+ // if new primary election started
+ // election will be avaialble: new primary vm and new primary record will not be null
+ this.env.primaryRecord = election.newPrimaryRecord;
+ this.env.primaryVm = election.newPrimary;
+ // only when primary record isn't null, it needs to save the primary record
+ if (election.newPrimary && election.newPrimaryRecord) {
+ this.proxy.logAsInfo(
+ 'Primary election strategy completed.' +
+ ` The new primary is: vmId: ${election.newPrimaryRecord.vmId},` +
+ ` ip: ${election.newPrimaryRecord.ip}.`
+ );
+ // If the target VM is new elected primary, and is already in the monitor,
+ // can resolve the primary immediately
+ // otherwise, the primary election will be resolved when the elected primary
+ // state becomes in-service
+ if (
+ this.platform.vmEquals(this.env.targetVm, election.newPrimary) &&
+ this.env.targetHealthCheckRecord
+ ) {
+ election.newPrimaryRecord.voteEndTime = Date.now(); // election ends immediately
+ election.newPrimaryRecord.voteState = PrimaryRecordVoteState.Done;
+ }
+ // send notification
+ await this.sendAutoscaleNotifications(
+ this.env.targetVm,
+ 'An Autoscale primary election was just completed successfully.\n' +
+ `The new primary is: vmId: ${election.newPrimaryRecord.vmId},` +
+ ` ip: ${election.newPrimaryRecord.ip}.`,
+ 'Autoscale Primary Election Occurred (Sucess)'
+ );
+ action = 'save';
+ // should reload primary record
+ reloadPrimaryRecord = true;
+ } else {
+ // if primary election is needed but no primary can be elected, should send
+ // notifications to ask for manual observation or troubleshooting
+ if (decision === PrimaryElectionStrategyResult.CannotDeterminePrimary) {
+ this.proxy.logAsWarning(
+ 'Autoscale unable to determine the new primary device'
+ );
+ await this.sendAutoscaleNotifications(
+ this.env.targetVm,
+ 'The Autoscale primary election strategy cannot automatically' +
+ ' determine the new primary device using the device information.' +
+ ' Manually configuring the primary device is needed.',
+ 'Autoscale unable to determine the new primary device'
+ );
+ }
+ // NOTE: wait for the next round
+ else if (decision === PrimaryElectionStrategyResult.SkipAndContinue) {
+ // TODO: any action to take here?
+ this.proxy.logAsInfo(
+ 'Primary election strategy suggests that election' +
+ ' should skip this round and will restart in the next round.'
+ );
+ }
+ // do not need to save the primary record
+ action = 'noop';
+ // should not reload the primary record
+ reloadPrimaryRecord = false;
+ }
+ } catch (error) {
+ this.proxy.logForError('Primary election does not start. Error occurs.', error);
+ // do not need to save the primary record
+ action = 'noop';
+ // election isn't needed so new primary should be null
+ election.newPrimary = null;
+ election.newPrimaryRecord = null;
+ // should not reload the primary record
+ reloadPrimaryRecord = false;
+ }
+ } else {
+ // election isn't needed so new primary should be null
+ election.newPrimary = null;
+ election.newPrimaryRecord = null;
+ }
+
+ if (action === 'save') {
+ // CAUTION: there may be race conditions when updating the primary record
+ try {
+ this.proxy.logAsInfo(
+ 'Saving the primary record. ',
+ JSON.stringify(this.env.primaryRecord)
+ );
+ // NOTE: this is an upsert operation
+ await this.platform.updatePrimaryRecord(this.env.primaryRecord);
+ // primary record is saved, need to reload it
+ reloadPrimaryRecord = true;
+ } catch (error) {
+ // primary record is not saved, need to reload it anyway
+ reloadPrimaryRecord = true;
+ this.proxy.logForError('Unable to save primary record. ', error);
+ }
+ }
+
+ if (reloadPrimaryRecord) {
+ this.env.primaryRecord = await this.platform.getPrimaryRecord();
+ this.env.primaryVm = await this.platform.getPrimaryVm();
+ }
+
+ this.proxy.logAsInfo('called handlePrimaryElection.');
+ return election;
+ }
+ async handleStaleVm(): Promise {
+ const [activeVms, healthcheckRecords] = await Promise.all([
+ this.platform.listAutoscaleVm(false, false),
+ this.platform.listHealthCheckRecord()
+ ]);
+ const activeVmIds = activeVms.map(vm => vm.id);
+ const activeHealthCheckRecords = healthcheckRecords.filter(rec =>
+ activeVmIds.includes(rec.vmId)
+ );
+ const staleHealthCheckRecords = healthcheckRecords.filter(
+ rec => !activeVmIds.includes(rec.vmId)
+ );
+ // delete those stale healthcheck records
+ await Promise.all(
+ staleHealthCheckRecords.map(rec => {
+ this.proxy.logAsInfo(
+ `Deleting health check record of vm (id: ${rec.vmId}) ` +
+ 'that no longer exists.'
+ );
+ return this.platform.deleteHealthCheckRecord(rec);
+ })
+ );
+ return activeHealthCheckRecords;
+ }
+ async handleUnhealthyVm(vms: VirtualMachine[]): Promise {
+ this.proxy.logAsInfo('calling handleUnhealthyVm.');
+ // call the platform scaling group to terminate the vm in the list
+ const settings = await this.platform.getSettings();
+ const terminateUnhealthyVmSettingItem = settings.get(AutoscaleSetting.TerminateUnhealthyVm);
+ const terminateUnhealthyVm =
+ terminateUnhealthyVmSettingItem && terminateUnhealthyVmSettingItem.truthValue;
+ const vmHandler = async (vm: VirtualMachine): Promise => {
+ this.proxy.logAsInfo(`handling unhealthy vm(id: ${vm.id})...`);
+ const subject = 'Autoscale unhealthy vm is detected';
+ let message =
+ `Device (id: ${vm.id}, ip: ${vm.primaryPrivateIpAddress}) has` +
+ ' been deemed unhealthy and marked as out-of-sync by the Autoscale.\n\n';
+ this.proxy.logAsWarning(
+ 'Termination of unhealthy vm is ' +
+ `${terminateUnhealthyVm ? 'enabled' : 'disabled'}.` +
+ ` vm (id: ${vm.id}) will ${terminateUnhealthyVm ? '' : 'not '}be deleted.`
+ );
+ // if termination of unhealthy vm is set to true, terminate it
+ if (terminateUnhealthyVm) {
+ try {
+ await this.platform.deleteVmFromScalingGroup(vm.id);
+ // delete corresponding health check record
+ const healthcheckRecord = await this.platform.getHealthCheckRecord(vm.id);
+ if (healthcheckRecord) {
+ await this.platform.deleteHealthCheckRecord(healthcheckRecord);
+ }
+ try {
+ message +=
+ 'Autoscale is now terminating this device.\n' +
+ 'Depending on the scaling policies, a replacement device may be created.' +
+ ' Further investigation for the cause of termination may be necessary.';
+ this.sendAutoscaleNotifications(vm, message, subject);
+ this.proxy.logAsInfo(`handling vm (id: ${vm.id}) completed.`);
+ } catch (err) {
+ this.proxy.logForError('unable to send Autoscale notifications.', err);
+ }
+ } catch (error) {
+ this.proxy.logForError('handling unhealthy vm failed.', error);
+ }
+ }
+ // otherwise, send a warning for this unhealthy vm and keep it
+ else {
+ // get the health check record for the vm.
+ const healthcheckRecord = await this.platform.getHealthCheckRecord(vm.id);
+ try {
+ message +=
+ ' This device is excluded from being candidate of primary device.\n' +
+ ` It requires (${healthcheckRecord.syncRecoveryCount})` +
+ ' on-time heartbeats to recover from out-of-sync state to in-sync state.\n' +
+ ' A full recovery will include this device into primary elections again.\n';
+ this.sendAutoscaleNotifications(vm, message, subject);
+ } catch (err) {
+ this.proxy.logForError('unable to send Autoscale notifications.', err);
+ }
+ }
+ };
+ await Promise.all(vms.map(vmHandler));
+ this.proxy.logAsInfo('called handleUnhealthyVm.');
+ }
+ async handleLicenseAssignment(productName: string): Promise {
+ this.proxy.logAsInfo('calling handleLicenseAssignment.');
+ // load target vm
+ if (!this.env.targetVm) {
+ this.env.targetVm = await this.platform.getTargetVm();
+ }
+ // if target vm doesn't exist, unknown request
+ if (!this.env.targetVm) {
+ const error = new Error(`Requested non - existing vm(id: ${this.env.targetId}).`);
+ this.proxy.logForError('', error);
+ throw error;
+ }
+ const settings = await this.platform.getSettings();
+ // assume to use the custom asset container as the storage directory for license files.
+ const customAssetContainer =
+ (settings.get(AutoscaleSetting.CustomAssetContainer) &&
+ settings.get(AutoscaleSetting.CustomAssetContainer).value) ||
+ '';
+ const customAssetDirectory =
+ (settings.get(AutoscaleSetting.CustomAssetDirectory) &&
+ settings.get(AutoscaleSetting.CustomAssetDirectory).value) ||
+ '';
+ const defaultAssetContainer =
+ (settings.get(AutoscaleSetting.AssetStorageContainer) &&
+ settings.get(AutoscaleSetting.AssetStorageContainer).value) ||
+ '';
+ const defaultAssetDirectory =
+ (settings.get(AutoscaleSetting.AssetStorageDirectory) &&
+ settings.get(AutoscaleSetting.AssetStorageDirectory).value) ||
+ '';
+ const licenseFileDirectory =
+ (settings.get(AutoscaleSetting.LicenseFileDirectory) &&
+ settings.get(AutoscaleSetting.LicenseFileDirectory).value) ||
+ '';
+ const assetContainer = customAssetContainer || defaultAssetContainer;
+ const assetDirectory =
+ (customAssetContainer && customAssetDirectory) || defaultAssetDirectory;
+
+ const licenseDirectory: string = path.posix.join(
+ assetDirectory,
+ licenseFileDirectory,
+ productName
+ );
+ this.licensingStrategy.prepare(
+ this.env.targetVm,
+ productName,
+ assetContainer,
+ licenseDirectory
+ );
+ let result: LicensingStrategyResult;
+ let licenseContent = '';
+ try {
+ result = await this.licensingStrategy.apply();
+ } catch (e) {
+ this.proxy.logForError('Error in running licensing strategy.', e);
+ }
+ if (result === LicensingStrategyResult.LicenseAssigned) {
+ licenseContent = await this.licensingStrategy.getLicenseContent();
+ } else if (result === LicensingStrategyResult.LicenseNotRequired) {
+ this.proxy.logAsInfo(
+ `license isn't required for this vm (id: ${this.env.targetVm.id})`
+ );
+ } else if (result === LicensingStrategyResult.LicenseOutOfStock) {
+ const notificationSubject = 'FortiGate Autoscale license assignment error';
+ const notificationMessage =
+ `FortiGate (id: ${this.env.targetVm.id}) cannot be assigned a license` +
+ ' because all available licenses have been allocated.' +
+ ' Please check the Autoscale handler function logs for more details.';
+ await this.sendAutoscaleNotifications(
+ this.env.targetVm,
+ notificationMessage,
+ notificationSubject
+ );
+ this.proxy.logAsError(
+ 'License out of stock. ' +
+ `No license is assigned to this vm (id: ${this.env.targetVm.id})`
+ );
+ }
+ this.proxy.logAsInfo('called handleLicenseAssignment.');
+ return licenseContent;
+ }
+
+ async saveSettings(
+ input: { [key: string]: string },
+ itemDict: SettingItemDictionary
+ ): Promise {
+ const errorTasks: string[] = [];
+ const unsupportedKeys: string[] = [];
+ const settingItemDefKey: string[] = Object.keys(itemDict);
+ const tasks = Object.entries(input).map(([settingKey, settingValue]) => {
+ const key = settingKey.toLowerCase();
+ if (settingItemDefKey.includes(key)) {
+ const def = itemDict[key];
+ let value = settingValue;
+ if (def.booleanType) {
+ value = (settingValue === 'true' && 'true') || 'false';
+ }
+
+ return this.platform
+ .saveSettingItem(
+ def.keyName,
+ value,
+ def.description,
+ def.jsonEncoded,
+ def.editable
+ )
+ .then(() => true)
+ .catch(error => {
+ this.proxy.logForError(`failed to save setting for key: ${key}. `, error);
+ errorTasks.push(key);
+ return true;
+ });
+ } else {
+ unsupportedKeys.push(key);
+ return Promise.resolve(true);
+ }
+ });
+
+ if (unsupportedKeys.length > 0) {
+ this.proxy.logAsWarning(
+ `Unsupported setting cannot be saved: ${unsupportedKeys.join(', ')}.`
+ );
+ }
+
+ await Promise.all(tasks);
+ return errorTasks.length === 0;
+ }
+
+ onVmFullyConfigured(): Promise {
+ this.proxy.logAsInfo(`Vm (id: ${this.env.targetVm.id}) is fully configured.`);
+ return Promise.resolve();
+ }
+
+ async handleEgressTrafficRoute(): Promise {
+ this.proxy.logAsInfo('calling handleEgressTrafficRoute.');
+ await this.routingEgressTrafficStrategy.apply();
+ this.proxy.logAsInfo('called handleEgressTrafficRoute.');
+ }
+
+ sendAutoscaleNotifications(
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ vm: VirtualMachine,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ message?: string,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ subject?: string
+ ): Promise {
+ this.proxy.logAsWarning('sendAutoscaleNotifications not implemented.');
+ return Promise.resolve();
+ }
+}
+
+type FinderRef = { [key: string]: FinderRef } | [] | string | null;
+export function configSetResourceFinder(resObject: FinderRef, nodePath: string): FinderRef {
+ const [, mPath] = nodePath.match(/^{(.+)}$/i);
+ if (!resObject || !nodePath) {
+ return '';
+ }
+ const nodes = mPath.split('.');
+ let ref = resObject;
+
+ nodes.find(nodeName => {
+ const matches = nodeName.match(/^([A-Za-z_@-]+)#([0-9])+$/i);
+ if (matches && Array.isArray(ref[matches[1]]) && ref[matches[1]].length > matches[2]) {
+ ref = ref[matches[1]][matches[2]];
+ } else if (!ref[nodeName]) {
+ ref = null;
+ return null;
+ } else {
+ ref =
+ Array.isArray(ref[nodeName]) && ref[nodeName].length > 0
+ ? ref[nodeName][0]
+ : ref[nodeName];
+ }
+ });
+ return ref;
+}
diff --git a/core/autoscale-environment.ts b/core/autoscale-environment.ts
new file mode 100644
index 0000000..1887cab
--- /dev/null
+++ b/core/autoscale-environment.ts
@@ -0,0 +1,16 @@
+import { HealthCheckRecord, PrimaryRecord } from './primary-election';
+import { VirtualMachine } from './virtual-machine';
+
+export interface AutoscaleEnvironment {
+ primaryId?: string;
+ primaryVm?: VirtualMachine;
+ primaryScalingGroup?: string;
+ primaryHealthCheckRecord?: HealthCheckRecord;
+ primaryRecord: PrimaryRecord;
+ primaryRoleChanged?: boolean;
+ targetId?: string;
+ targetVm?: VirtualMachine;
+ targetScalingGroup?: string;
+ targetHealthCheckRecord?: HealthCheckRecord;
+ [key: string]: unknown;
+}
diff --git a/core/autoscale-service-provider.ts b/core/autoscale-service-provider.ts
new file mode 100644
index 0000000..0e6410c
--- /dev/null
+++ b/core/autoscale-service-provider.ts
@@ -0,0 +1,19 @@
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum AutoscaleServiceType {
+ SaveAutoscaleSettings = 'saveSettings',
+ StartAutoscale = 'startAutoscale',
+ StopAutoscale = 'stopAutoscale'
+}
+export interface AutoscaleServiceRequest {
+ source: string;
+ serviceType: string;
+ [key: string]: string;
+}
+
+export interface AutoscaleServiceProvider {
+ handleServiceRequest(request: TReq): Promise;
+ startAutoscale(): Promise;
+ stopAutoscale(): Promise;
+ saveAutoscaleSettings(props: { [key: string]: string }): Promise;
+}
diff --git a/core/autoscale-setting.ts b/core/autoscale-setting.ts
new file mode 100644
index 0000000..d1f70fa
--- /dev/null
+++ b/core/autoscale-setting.ts
@@ -0,0 +1,441 @@
+import { JSONable } from './jsonable';
+
+/**
+ * Enumerated value of SettingItem keys
+ *
+ * @export
+ * @enum {number}
+ */
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum AutoscaleSetting {
+ AdditionalConfigSetNameList = 'additional-configset-name-list',
+ AutoscaleFunctionExtendExecution = 'autoscale-function-extend-execution',
+ AutoscaleFunctionMaxExecutionTime = 'autoscale-function-max-execution-time',
+ AutoscaleHandlerUrl = 'autoscale-handler-url',
+ AssetStorageContainer = 'asset-storage-name',
+ AssetStorageDirectory = 'asset-storage-key-prefix',
+ ByolScalingGroupDesiredCapacity = 'byol-scaling-group-desired-capacity',
+ ByolScalingGroupMinSize = 'byol-scaling-group-min-size',
+ ByolScalingGroupMaxSize = 'byol-scaling-group-max-size',
+ ByolScalingGroupName = 'byol-scaling-group-name',
+ CustomAssetContainer = 'custom-asset-container',
+ CustomAssetDirectory = 'custom-asset-directory',
+ EnableExternalElb = 'enable-external-elb',
+ EnableHybridLicensing = 'enable-hybrid-licensing',
+ EnableInternalElb = 'enable-internal-elb',
+ EnableNic2 = 'enable-second-nic',
+ EnableVmInfoCache = 'enable-vm-info-cache',
+ HeartbeatDelayAllowance = 'heartbeat-delay-allowance',
+ HeartbeatInterval = 'heartbeat-interval',
+ HeartbeatLossCount = 'heartbeat-loss-count',
+ LicenseFileDirectory = 'license-file-directory',
+ PrimaryElectionTimeout = 'primary-election-timeout',
+ PrimaryScalingGroupName = 'primary-scaling-group-name',
+ PaygScalingGroupDesiredCapacity = 'scaling-group-desired-capacity',
+ PaygScalingGroupMinSize = 'scaling-group-min-size',
+ PaygScalingGroupMaxSize = 'scaling-group-max-size',
+ PaygScalingGroupName = 'payg-scaling-group-name',
+ ResourceTagPrefix = 'resource-tag-prefix',
+ SyncRecoveryCount = 'sync-recovery-count',
+ TerminateUnhealthyVm = 'terminate-unhealthy-vm',
+ VmInfoCacheTime = 'vm-info-cache-time',
+ VpnBgpAsn = 'vpn-bgp-asn'
+}
+
+export interface SettingItemDefinition {
+ keyName: string;
+ description: string;
+ editable: boolean;
+ jsonEncoded: boolean;
+ booleanType: boolean;
+}
+
+export interface SubnetPair {
+ subnetId: string;
+ pairIdList: string[];
+}
+
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum SubnetPairIndex {
+ Service,
+ Management
+}
+
+/**
+ *
+ *
+ * @export
+ * @class SettingItem
+ */
+export class SettingItem {
+ static NO_VALUE = 'n/a';
+ /**
+ *Creates an instance of SettingItem.
+ * @param {string} key setting key
+ * @param {string} rawValue the value stored as string type,
+ * for actual type of : string, number, boolean, etc.
+ * @param {string} description description of this setting item
+ * @param {boolean} editable a flag for whether the value should be editable after deployment or not
+ * @param {string} jsonEncoded a flag for whether the value is a JSON object or not.
+ * If yes, can get the JSON object from
+ * calling the jsonValue of this setting item.
+ */
+ constructor(
+ readonly key: string,
+ private readonly rawValue: string,
+ readonly description: string,
+ readonly editable: boolean,
+ readonly jsonEncoded: boolean
+ ) {}
+ /**
+ * the string type value of the setting.
+ *
+ * @readonly
+ * @type {string}
+ */
+ get value(): string {
+ return this.rawValue.trim().toLowerCase() === SettingItem.NO_VALUE ? null : this.rawValue;
+ }
+ /**
+ * Returns the object type of this setting if it is a JSON object,
+ * or null if it isn't.
+ *
+ * @readonly
+ * @type {{}}
+ */
+ get jsonValue(): JSONable {
+ if (this.jsonEncoded) {
+ try {
+ return JSON.parse(this.value);
+ } catch (error) {
+ return null;
+ }
+ } else {
+ return null;
+ }
+ }
+ /**
+ * Returns a truth value if the value of this setting is either a string 'true' or 'false'.
+ * It's handy to be used in boolean comparisons.
+ *
+ * @readonly
+ * @type {boolean}
+ */
+ get truthValue(): boolean {
+ return this.value && this.value.trim().toLowerCase() === 'true';
+ }
+
+ /**
+ * stringify this SettingItem
+ * @returns {string} string
+ */
+ stringify(): string {
+ return JSON.stringify({
+ key: this.key,
+ value: this.rawValue,
+ description: this.description,
+ editable: this.editable,
+ jsonEncoded: this.jsonEncoded
+ });
+ }
+
+ /**
+ * parse a string as a SettingItem
+ *
+ * @static
+ * @param {string} s string to parse
+ * @returns {SettingItem} settingitem object
+ */
+ static parse(s: string): SettingItem {
+ const o = JSON.parse(s);
+ const k = Object.keys(o);
+ if (
+ !(
+ k.includes('key') &&
+ k.includes('value') &&
+ k.includes('description') &&
+ k.includes('editable') &&
+ k.includes('jsonEncoded')
+ )
+ ) {
+ throw new Error(
+ `Unable to parse string (${s}) to SettingItem. Missing required properties.`
+ );
+ }
+ return new SettingItem(o.key, o.value, o.description, o.editable, o.jsonEncoded);
+ }
+}
+
+export type Settings = Map;
+
+export interface SettingItemReference {
+ [key: string]: string;
+}
+
+export interface SettingItemDictionary {
+ [key: string]: SettingItemDefinition;
+}
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export const AutoscaleSettingItemDictionary: SettingItemDictionary = {
+ [AutoscaleSetting.AdditionalConfigSetNameList]: {
+ keyName: AutoscaleSetting.AdditionalConfigSetNameList,
+ description:
+ 'The comma-separated list of the name of a configset. These configsets' +
+ ' are required dependencies for the Autoscale to work for a certain ' +
+ ' deployment. Can be left empty.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.AutoscaleFunctionExtendExecution]: {
+ keyName: AutoscaleSetting.AutoscaleFunctionExtendExecution,
+ description:
+ 'Allow one single Autoscale function to be executed in multiple extended invocations' +
+ ' of a cloud platform function if it cannot finish within one invocation and its' +
+ ' functionality supports splitting into extended invocations.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: true
+ },
+ [AutoscaleSetting.AutoscaleFunctionMaxExecutionTime]: {
+ keyName: AutoscaleSetting.AutoscaleFunctionMaxExecutionTime,
+ description:
+ 'Maximum execution time (in second) allowed for an Autoscale Cloud Function that can' +
+ ' run in one cloud function invocation or multiple extended invocations.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.AutoscaleHandlerUrl]: {
+ keyName: AutoscaleSetting.AutoscaleHandlerUrl,
+ description:
+ 'The Autoscale handler (cloud function) URL as the communication endpoint between' +
+ 'Autoscale and device in the scaling group(s).',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.AssetStorageContainer]: {
+ keyName: AutoscaleSetting.AssetStorageContainer,
+ description: 'Asset storage name.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.AssetStorageDirectory]: {
+ keyName: AutoscaleSetting.AssetStorageDirectory,
+ description: 'Asset storage key prefix.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.ByolScalingGroupDesiredCapacity]: {
+ keyName: AutoscaleSetting.ByolScalingGroupDesiredCapacity,
+ description: 'BYOL Scaling group desired capacity.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.ByolScalingGroupMinSize]: {
+ keyName: AutoscaleSetting.ByolScalingGroupMinSize,
+ description: 'BYOL Scaling group min size.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.ByolScalingGroupMaxSize]: {
+ keyName: AutoscaleSetting.ByolScalingGroupMaxSize,
+ description: 'BYOL Scaling group max size.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.ByolScalingGroupName]: {
+ keyName: AutoscaleSetting.ByolScalingGroupName,
+ description: 'The name of the BYOL auto scaling group.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.CustomAssetContainer]: {
+ keyName: AutoscaleSetting.CustomAssetContainer,
+ description:
+ 'The asset storage name for some user custom resources, such as: custom configset, license files, etc.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.CustomAssetDirectory]: {
+ keyName: AutoscaleSetting.CustomAssetDirectory,
+ description:
+ 'The sub directory to the user custom resources under the custom-asset-container.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.EnableExternalElb]: {
+ keyName: AutoscaleSetting.EnableExternalElb,
+ description:
+ 'Toggle ON / OFF the external elastic load balancing for device in the external-facing Autoscale scaling group(s).',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: true
+ },
+ [AutoscaleSetting.EnableHybridLicensing]: {
+ keyName: AutoscaleSetting.EnableHybridLicensing,
+ description: 'Toggle ON / OFF the hybrid licensing feature.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: true
+ },
+ [AutoscaleSetting.EnableInternalElb]: {
+ keyName: AutoscaleSetting.EnableInternalElb,
+ description:
+ 'Toggle ON / OFF the internal elastic load balancing feature to allow traffic flow out' +
+ ' the device in the Autoscale scaling groups(s) into an internal load balancer.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: true
+ },
+ [AutoscaleSetting.EnableNic2]: {
+ keyName: AutoscaleSetting.EnableNic2,
+ description:
+ 'Toggle ON / OFF the secondary eni creation on each device in the Autoscale scaling group(s).',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: true
+ },
+ [AutoscaleSetting.EnableVmInfoCache]: {
+ keyName: AutoscaleSetting.EnableVmInfoCache,
+ description:
+ 'Toggle ON / OFF the vm info cache feature. It caches the ' +
+ 'vm info in db to reduce API calls to query a vm from the platform.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: true
+ },
+ [AutoscaleSetting.HeartbeatDelayAllowance]: {
+ keyName: AutoscaleSetting.HeartbeatDelayAllowance,
+ description:
+ 'The maximum amount of time (in seconds) allowed for network latency of the Autoscale' +
+ ' device heartbeat arriving at the Autoscale handler.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.HeartbeatInterval]: {
+ keyName: AutoscaleSetting.HeartbeatInterval,
+ description:
+ 'The length of time (in seconds) that an Autoscale device waits between' +
+ ' sending heartbeat requests to the Autoscale handler.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.HeartbeatLossCount]: {
+ keyName: AutoscaleSetting.HeartbeatLossCount,
+ description:
+ 'Number of consecutively lost heartbeats.' +
+ ' When the Heartbeat Loss Count has been reached,' +
+ ' the device is deemed unhealthy and fail-over activities will commence.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.LicenseFileDirectory]: {
+ keyName: AutoscaleSetting.LicenseFileDirectory,
+ description: 'The sub directory for storing license files under the asset container.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.PrimaryElectionTimeout]: {
+ keyName: AutoscaleSetting.PrimaryElectionTimeout,
+ description: 'The maximum time (in seconds) to wait for a primary election to complete.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.PrimaryScalingGroupName]: {
+ keyName: AutoscaleSetting.PrimaryScalingGroupName,
+ description: 'The name of the primary auto scaling group.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.PaygScalingGroupDesiredCapacity]: {
+ keyName: AutoscaleSetting.PaygScalingGroupDesiredCapacity,
+ description: 'PAYG Scaling group desired capacity.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.PaygScalingGroupMinSize]: {
+ keyName: AutoscaleSetting.PaygScalingGroupMinSize,
+ description: 'PAYG Scaling group min size.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.PaygScalingGroupMaxSize]: {
+ keyName: AutoscaleSetting.PaygScalingGroupMaxSize,
+ description: 'PAYG Scaling group max size.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.PaygScalingGroupName]: {
+ keyName: AutoscaleSetting.PaygScalingGroupName,
+ description: 'The name of the PAYG auto scaling group.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.ResourceTagPrefix]: {
+ keyName: AutoscaleSetting.ResourceTagPrefix,
+ description:
+ 'Resource tag prefix. Used on any resource that supports tagging or labeling.' +
+ ' Such resource will be given a tag or label starting with this prefix.' +
+ ' Also used as the name of the logical group for Autoscale resources' +
+ ' in those cloud platforms which support such logical grouping.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.SyncRecoveryCount]: {
+ keyName: AutoscaleSetting.SyncRecoveryCount,
+ description:
+ 'The number (positive integer) of on-time heartbeat for a vm needs to send to ' +
+ ' recover from the unhealthy state. Unhealthy vm will be excluded from being' +
+ ' candidate of primary elections.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.TerminateUnhealthyVm]: {
+ keyName: AutoscaleSetting.TerminateUnhealthyVm,
+ description:
+ 'Toggle for unhealthy vm handling behaviours. Set to true to terminate unhealthy vm' +
+ ' or set to false to keep the unhealthy vm.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: true
+ },
+ [AutoscaleSetting.VpnBgpAsn]: {
+ keyName: AutoscaleSetting.VpnBgpAsn,
+ description: 'The BGP Autonomous System Number used with the VPN connections.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ },
+ [AutoscaleSetting.VmInfoCacheTime]: {
+ keyName: AutoscaleSetting.VmInfoCacheTime,
+ description: 'The vm info cache time in seconds.',
+ editable: true,
+ jsonEncoded: false,
+ booleanType: false
+ }
+};
diff --git a/core/azure/azure-cloud-function-proxy.ts b/core/azure/azure-cloud-function-proxy.ts
new file mode 100644
index 0000000..33d8638
--- /dev/null
+++ b/core/azure/azure-cloud-function-proxy.ts
@@ -0,0 +1,190 @@
+import { Context, HttpRequest } from '@azure/functions';
+import {
+ AutoscaleServiceRequest,
+ CloudFunctionProxy,
+ CloudFunctionResponseBody,
+ JSONable,
+ jsonStringifyReplacer,
+ LogLevel,
+ mapHttpMethod,
+ ReqHeaders,
+ ReqMethod
+} from '..';
+
+export interface AzureFunctionResponse {
+ status: number;
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ body: any;
+}
+
+export interface LogItem {
+ level: LogLevel;
+ timestamp: number;
+ arguments?: unknown[];
+}
+
+export abstract class AzureCloudFunctionProxy extends CloudFunctionProxy<
+ TReq,
+ Context,
+ AzureFunctionResponse
+> {
+ request: TReq;
+ context: Context;
+ private messageQueue: LogItem[] = [];
+ log(message: string, level: LogLevel, ...others: unknown[]): void {
+ if (process.env.DEBUG_LOGGER_OUTPUT_QUEUE_ENABLED === 'true') {
+ this.enqueue(message, level, ...others);
+ return;
+ }
+ switch (level) {
+ case LogLevel.Debug:
+ this.context.log(message, ...others);
+ break;
+ case LogLevel.Error:
+ this.context.log.error(message, ...others);
+ break;
+ case LogLevel.Info:
+ this.context.log.info(message, ...others);
+ break;
+ case LogLevel.Warn:
+ this.context.log.warn(message, ...others);
+ break;
+ default:
+ this.context.log.error(message, ...others);
+ }
+ }
+
+ getRemainingExecutionTime(): Promise {
+ throw new Error(
+ 'Not supposed to call the AzureFunctionInvocationProxy.getRemainingExecutionTime()' +
+ ' method in this implementation.' +
+ ' Is it just a mistake?'
+ );
+ }
+
+ /**
+ * return a formatted AWS Lambda handler response
+ * @param {number} httpStatusCode http status code
+ * @param {CloudFunctionResponseBody} body response body
+ * @param {{}} headers response header
+ * @returns {AzureFunctionResponse} function response
+ */
+ formatResponse(
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ httpStatusCode: number,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ body: CloudFunctionResponseBody,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ headers: unknown
+ ): AzureFunctionResponse {
+ // NOTE: if enable queued log output, output log here
+ if (process.env.DEBUG_LOGGER_OUTPUT_QUEUE_ENABLED === 'true') {
+ const messages: unknown[] = [];
+ this.allLogs.forEach(log => {
+ messages.push(`[${log.level}]`);
+ messages.push(...log.arguments);
+ messages.push('\n');
+ });
+ this.context.log(...messages);
+ }
+ return null;
+ }
+
+ protected enqueue(message: string, level: LogLevel, ...args: unknown[]): void {
+ const item: LogItem = {
+ level: level,
+ timestamp: Date.now() + new Date().getTimezoneOffset() * 60000, // GMT time in ms
+ arguments: []
+ };
+ item.arguments = Array.from(args).map(arg => {
+ return JSON.stringify(arg, jsonStringifyReplacer);
+ });
+ item.arguments.unshift(message);
+ this.messageQueue.push(item);
+ }
+
+ get allLogs(): LogItem[] {
+ return this.messageQueue;
+ }
+}
+
+export class AzureFunctionServiceProviderProxy extends AzureCloudFunctionProxy {
+ getRequestAsString(): Promise {
+ return Promise.resolve((this.request && JSON.stringify(this.request)) || '');
+ }
+ getReqBody(): Promise {
+ return Promise.resolve(this.request || {});
+ }
+ getReqHeaders(): Promise {
+ return Promise.resolve({});
+ }
+ getReqMethod(): Promise {
+ return Promise.resolve(null);
+ }
+ getReqQueryParameters(): Promise<{ [name: string]: string }> {
+ return Promise.resolve({});
+ }
+}
+
+export class AzureFunctionHttpTriggerProxy extends AzureCloudFunctionProxy {
+ getReqBody(): Promise {
+ try {
+ if (this.context.req.body && typeof this.context.req.body === 'string') {
+ return JSON.parse(this.context.req.body as string);
+ } else if (this.context.req.body && typeof this.context.req.body === 'object') {
+ return Promise.resolve({ ...this.context.req.body });
+ } else {
+ return null;
+ }
+ } catch (error) {
+ return null;
+ }
+ }
+ getReqQueryParameters(): Promise<{ [name: string]: string }> {
+ return Promise.resolve(this.context.req.params);
+ }
+ getRequestAsString(): Promise {
+ return Promise.resolve(this.context.req && JSON.stringify(this.context.req));
+ }
+ getReqHeaders(): Promise {
+ // NOTE: header keys will be treated case-insensitive as per
+ // the RFC https://tools.ietf.org/html/rfc7540#section-8.1.2
+ const headers: ReqHeaders = (this.context.req.headers && {}) || null;
+ if (this.context.req.headers) {
+ Object.entries(this.context.req.headers).forEach(([k, v]) => {
+ headers[String(k).toLowerCase()] = v;
+ });
+ }
+ return Promise.resolve(headers);
+ }
+
+ getReqMethod(): Promise {
+ return Promise.resolve(mapHttpMethod(this.context.req.method));
+ }
+
+ /**
+ * return a formatted AWS Lambda handler response
+ * @param {number} httpStatusCode http status code
+ * @param {CloudFunctionResponseBody} body response body
+ * @param {{}} headers response header
+ * @returns {AzureFunctionResponse} function response
+ */
+ formatResponse(
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ httpStatusCode: number,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ body: CloudFunctionResponseBody,
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ headers: unknown
+ ): AzureFunctionResponse {
+ super.formatResponse(httpStatusCode, body, headers);
+ return {
+ status: httpStatusCode,
+ body: body
+ };
+ }
+}
+
+export type AzureFunctionInvocationProxy =
+ | AzureFunctionHttpTriggerProxy
+ | AzureFunctionServiceProviderProxy;
diff --git a/core/azure/azure-db-definitions.ts b/core/azure/azure-db-definitions.ts
new file mode 100644
index 0000000..cdd3f9b
--- /dev/null
+++ b/core/azure/azure-db-definitions.ts
@@ -0,0 +1,644 @@
+/* eslint-disable @typescript-eslint/naming-convention */
+import * as DBDef from '../db-definitions';
+
+// NOTE: Azure Cosmos DB Data modeling concepts
+// see: https://docs.microsoft.com/en-us/azure/cosmos-db/modeling-data
+// Cosmos DB is a schema-free type of database so the data type definitions have no effect on
+// items.
+// The types here are still given just for good readabilty.
+export const AzureTypeRefs: DBDef.TypeRefMap = new Map([
+ [DBDef.TypeRef.StringType, 'string'],
+ [DBDef.TypeRef.NumberType, 'number'],
+ [DBDef.TypeRef.BooleanType, 'boolean'],
+ [DBDef.TypeRef.PrimaryKey, 'hash'],
+ [DBDef.TypeRef.SecondaryKey, 'range']
+]);
+
+export interface CosmosDBQueryWhereClause {
+ name: string;
+ value: string;
+}
+
+export interface CosmosDBQueryResult {
+ result: T[];
+ query?: string;
+}
+
+// CosmosDB table has some useful meta properties added to each item
+// they are defined here below
+export interface CosmosDbTableMetaData {
+ id: string;
+ _rid: string;
+ _self: string;
+ _etag: string;
+ _attachments: string;
+ _ts: number;
+ [key: string]: string | number | boolean;
+}
+
+export const CosmosDbTableMetaDataAttributes = [
+ {
+ name: 'id',
+ attrType: DBDef.TypeRef.StringType,
+ isKey: false
+ },
+ {
+ name: '_attachments',
+ attrType: DBDef.TypeRef.StringType,
+ isKey: false
+ },
+ {
+ name: '_etag',
+ attrType: DBDef.TypeRef.StringType,
+ isKey: false
+ },
+ {
+ name: '_rid',
+ attrType: DBDef.TypeRef.StringType,
+ isKey: false
+ },
+ {
+ name: '_self',
+ attrType: DBDef.TypeRef.StringType,
+ isKey: false
+ },
+ {
+ name: '_ts',
+ attrType: DBDef.TypeRef.NumberType,
+ isKey: false
+ }
+];
+
+export class CosmosDBTypeConverter extends DBDef.TypeConverter {
+ valueToString(value: unknown): string {
+ return value as string;
+ }
+ valueToNumber(value: unknown): number {
+ return Number(value as string);
+ }
+ valueToBoolean(value: unknown): boolean {
+ return !!value;
+ }
+}
+
+export interface AzureAutoscaleDbItem extends DBDef.AutoscaleDbItem, CosmosDbTableMetaData {}
+
+export class AzureAutoscale
+ extends DBDef.Autoscale
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureAutoscaleDbItem {
+ const item: AzureAutoscaleDbItem = {
+ ...super.convertRecord(record),
+ id: this.typeConvert.valueToString(record.id),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.AutoscaleDbItem): AzureAutoscaleDbItem {
+ const item: AzureAutoscaleDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureAutoscaleDbItem): DBDef.AutoscaleDbItem {
+ const item: AzureAutoscaleDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzurePrimaryElectionDbItem
+ extends DBDef.PrimaryElectionDbItem,
+ CosmosDbTableMetaData {}
+export class AzurePrimaryElection
+ extends DBDef.PrimaryElection
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzurePrimaryElectionDbItem {
+ const item: AzurePrimaryElectionDbItem = {
+ ...super.convertRecord(record),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.PrimaryElectionDbItem): AzurePrimaryElectionDbItem {
+ const item: AzurePrimaryElectionDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzurePrimaryElectionDbItem): DBDef.PrimaryElectionDbItem {
+ const item: AzurePrimaryElectionDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzureFortiAnalyzerDbItem
+ extends DBDef.FortiAnalyzerDbItem,
+ CosmosDbTableMetaData {}
+
+export class AzureFortiAnalyzer
+ extends DBDef.FortiAnalyzer
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureFortiAnalyzerDbItem {
+ const item: AzureFortiAnalyzerDbItem = {
+ ...super.convertRecord(record),
+ id: this.typeConvert.valueToString(record.id),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.FortiAnalyzerDbItem): AzureFortiAnalyzerDbItem {
+ const item: AzureFortiAnalyzerDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureFortiAnalyzerDbItem): DBDef.FortiAnalyzerDbItem {
+ const item: AzureFortiAnalyzerDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzureSettingsDbItem extends DBDef.SettingsDbItem, CosmosDbTableMetaData {}
+
+export class AzureSettings
+ extends DBDef.Settings
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureSettingsDbItem {
+ const item: AzureSettingsDbItem = {
+ ...super.convertRecord(record),
+ id: this.typeConvert.valueToString(record.id),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.SettingsDbItem): AzureSettingsDbItem {
+ const item: AzureSettingsDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureSettingsDbItem): DBDef.SettingsDbItem {
+ const item: AzureSettingsDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzureVmInfoCacheDbItem extends DBDef.VmInfoCacheDbItem, CosmosDbTableMetaData {}
+
+export class AzureVmInfoCache
+ extends DBDef.VmInfoCache
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureVmInfoCacheDbItem {
+ const item: AzureVmInfoCacheDbItem = {
+ ...super.convertRecord(record),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.VmInfoCacheDbItem): AzureVmInfoCacheDbItem {
+ const item: AzureVmInfoCacheDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureVmInfoCacheDbItem): DBDef.VmInfoCacheDbItem {
+ const item: AzureVmInfoCacheDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzureLicenseStockDbItem extends DBDef.LicenseStockDbItem, CosmosDbTableMetaData {}
+
+export class AzureLicenseStock
+ extends DBDef.LicenseStock
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureLicenseStockDbItem {
+ const item: AzureLicenseStockDbItem = {
+ ...super.convertRecord(record),
+ id: this.typeConvert.valueToString(record.id),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.LicenseStockDbItem): AzureLicenseStockDbItem {
+ const item: AzureLicenseStockDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureLicenseStockDbItem): DBDef.LicenseStockDbItem {
+ const item: AzureLicenseStockDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzureLicenseUsageDbItem extends DBDef.LicenseUsageDbItem, CosmosDbTableMetaData {}
+
+export class AzureLicenseUsage
+ extends DBDef.LicenseUsage
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureLicenseUsageDbItem {
+ const item: AzureLicenseUsageDbItem = {
+ ...super.convertRecord(record),
+ id: this.typeConvert.valueToString(record.id),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.LicenseUsageDbItem): AzureLicenseUsageDbItem {
+ const item: AzureLicenseUsageDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureLicenseUsageDbItem): DBDef.LicenseUsageDbItem {
+ const item: AzureLicenseUsageDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzureCustomLogDbItem extends DBDef.CustomLogDbItem, CosmosDbTableMetaData {}
+
+export class AzureCustomLog
+ extends DBDef.CustomLog
+ implements DBDef.BidirectionalCastable
+{
+ constructor(namePrefix = '', nameSuffix = '') {
+ super(new CosmosDBTypeConverter(), namePrefix, nameSuffix);
+ // NOTE: use AWS DynamoDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ }
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureCustomLogDbItem {
+ const item: AzureCustomLogDbItem = {
+ ...super.convertRecord(record),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ return item;
+ }
+
+ downcast(record: DBDef.CustomLogDbItem): AzureCustomLogDbItem {
+ const item: AzureCustomLogDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureCustomLogDbItem): DBDef.CustomLogDbItem {
+ const item: AzureCustomLogDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
+
+export interface AzureApiRequestCacheDbItem
+ extends DBDef.ApiRequestCacheDbItem,
+ CosmosDbTableMetaData {}
+
+export class AzureApiRequestCache
+ extends DBDef.Table
+ implements DBDef.BidirectionalCastable
+{
+ static ownStaticAttributes: DBDef.Attribute[] = [
+ ...CosmosDbTableMetaDataAttributes, // NOTE: add addtional Azure CosmosDB table meta data attributes
+ // NOTE: use the same attributes of a sibling class, attributes with the same key will
+ // those in ...CosmosDbTableMetaDataAttributes
+ ...DBDef.ApiRequestCache.ownStaticAttributes
+ ];
+ private siblingClass: DBDef.ApiRequestCache;
+ constructor(namePrefix = '', nameSuffix = '') {
+ const converter = new CosmosDBTypeConverter();
+ super(converter, namePrefix, nameSuffix);
+ // NOTE: set the sibling class reference
+ this.siblingClass = new DBDef.ApiRequestCache(converter, namePrefix, nameSuffix);
+ // NOTE: use Azure CosmosDB type refs
+ this.alterAttributesUsingTypeReference(AzureTypeRefs);
+ // CAUTION: don't forget to set a correct name.
+ this.setName(this.siblingClass.name);
+ // CAUTION: don't forget to add attributes
+ AzureApiRequestCache.ownStaticAttributes.forEach(def => {
+ this.addAttribute(def);
+ });
+ }
+ /**
+ * @override override to provide additional meta data
+ */
+ convertRecord(record: DBDef.Record): AzureApiRequestCacheDbItem {
+ const item: AzureApiRequestCacheDbItem = {
+ ...this.siblingClass.convertRecord(record),
+ _attachments: this.typeConvert.valueToString(record._attachments),
+ _etag: this.typeConvert.valueToString(record._etag),
+ _rid: this.typeConvert.valueToString(record._rid),
+ _self: this.typeConvert.valueToString(record._self),
+ _ts: this.typeConvert.valueToNumber(record._ts)
+ };
+ // NOTE: the cacheTime property will use the value of _ts
+ item.cacheTime = item._ts;
+ return item;
+ }
+
+ downcast(record: DBDef.ApiRequestCacheDbItem): AzureApiRequestCacheDbItem {
+ const item: AzureApiRequestCacheDbItem = {
+ ...record,
+ // NOTE: id will be automatically use the primary key value
+ // if the record already has property 'id', the following assignmet will overwrite
+ // the id value.
+ id: String(record[this.primaryKey.name]),
+ _attachments: undefined,
+ _etag: undefined,
+ _rid: undefined,
+ _self: undefined,
+ _ts: undefined
+ };
+ return item;
+ }
+
+ upcast(record: AzureApiRequestCacheDbItem): DBDef.ApiRequestCacheDbItem {
+ const item: AzureApiRequestCacheDbItem = {
+ ...record
+ };
+ delete item._attachments;
+ delete item._etag;
+ delete item._rid;
+ delete item._self;
+ delete item._ts;
+ // delete id only if id is not the primary key
+ if (this.primaryKey.name !== 'id') {
+ delete item.id;
+ }
+ return { ...item };
+ }
+}
diff --git a/core/azure/azure-fortianalyzer-integration-service.ts b/core/azure/azure-fortianalyzer-integration-service.ts
new file mode 100644
index 0000000..cc768be
--- /dev/null
+++ b/core/azure/azure-fortianalyzer-integration-service.ts
@@ -0,0 +1,96 @@
+import { Context } from '@azure/functions';
+import { AutoscaleServiceProvider, AutoscaleServiceRequest, JSONable, ReqType } from '..';
+import {
+ FortiGateAutoscaleServiceRequestSource,
+ FortiGateAutoscaleServiceType
+} from '../fortigate-autoscale';
+import {
+ AzureFortiGateAutoscale,
+ AzureFunctionDef,
+ AzureFunctionServiceProviderProxy,
+ AzurePlatformAdapter
+} from '.';
+
+export class AzureFortiGateAutoscaleServiceProvider
+ implements AutoscaleServiceProvider
+{
+ constructor(readonly autoscale: AzureFortiGateAutoscale) {
+ this.autoscale = autoscale;
+ }
+ startAutoscale(): Promise {
+ this.autoscale.proxy.logAsWarning('[startAutoscale] Method not implemented.');
+ return Promise.resolve(true);
+ }
+ stopAutoscale(): Promise {
+ this.autoscale.proxy.logAsWarning('[stopAutoscale] Method not implemented.');
+ return Promise.resolve(true);
+ }
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ saveAutoscaleSettings(props: { [key: string]: string }): Promise {
+ this.autoscale.proxy.logAsWarning('[SaveAutoscaleSettings] Method not implemented.');
+ return Promise.resolve(true);
+ }
+ get proxy(): AzureFunctionServiceProviderProxy {
+ return this.autoscale.proxy as AzureFunctionServiceProviderProxy;
+ }
+ get platform(): AzurePlatformAdapter {
+ return this.autoscale.platform;
+ }
+ async handleServiceRequest(request: AutoscaleServiceRequest): Promise {
+ this.proxy.logAsInfo('calling handleServiceRequest');
+ try {
+ // Verify the incoming request.
+ // request url must be contained in the defined function name array: serviceFuncUrl
+ const allowedServiceEndpointsList: string[] = [AzureFunctionDef.FazAuthScheduler.name];
+ const functionName = this.proxy.context.executionContext.functionName;
+ if (!allowedServiceEndpointsList.includes(functionName)) {
+ this.proxy.logAsWarning(
+ 'Unauthorized source url.',
+ `request function name: ${JSON.stringify(functionName)}`,
+ `request: ${request}`
+ );
+ this.proxy.logAsInfo('called handleServiceRequest');
+ }
+ // req type must be ReqType.ServiceProviderRequest
+ const reqType: ReqType = await this.platform.getRequestType();
+ if (reqType !== ReqType.ServiceProviderRequest) {
+ this.proxy.logAsWarning(
+ 'Invalid service provider request.',
+ `request type: ${reqType}`,
+ `request: ${request}`
+ );
+ this.proxy.logAsInfo('called handleServiceRequest');
+ return;
+ }
+ // request body must contain key: 'source' with value: 'fortinet.autoscale'
+ if (request.source !== FortiGateAutoscaleServiceRequestSource.FortiGateAutoscale) {
+ this.proxy.logAsWarning(
+ 'Invalid service provider source.',
+ `request source: ${request.source}`,
+ `request: ${request}`
+ );
+ this.proxy.logAsInfo('called handleServiceRequest');
+ }
+ // service type must be present in request
+ if (!request.serviceType) {
+ this.proxy.logAsWarning(
+ 'Invalid service provider request type.',
+ `request source: ${request.serviceType}`,
+ `request: ${request}`
+ );
+ this.proxy.logAsInfo('called handleServiceRequest');
+ }
+ switch (request.serviceType) {
+ case FortiGateAutoscaleServiceType.TriggerFazDeviceAuth:
+ await this.autoscale.init();
+ await this.autoscale.triggerFazDeviceAuth();
+ break;
+ default:
+ throw new Error(`Unsupported service type: [${request.serviceType}]`);
+ }
+ } catch (error) {
+ this.proxy.logForError('Handle service request error.', error);
+ this.proxy.logAsInfo('called handleServiceRequest');
+ }
+ }
+}
diff --git a/core/azure/azure-fortigate-autoscale-settings.ts b/core/azure/azure-fortigate-autoscale-settings.ts
new file mode 100644
index 0000000..1fe50e1
--- /dev/null
+++ b/core/azure/azure-fortigate-autoscale-settings.ts
@@ -0,0 +1,23 @@
+import { SettingItemDictionary, SettingItemReference } from '..';
+import {
+ FortiGateAutoscaleSetting,
+ FortiGateAutoscaleSettingItemDictionary
+} from '../fortigate-autoscale';
+// NOTE: every key must start with 'Azure' prefix but the value do not need the prefix
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export const AzureFortiGateAutoscaleSetting: SettingItemReference = {
+ ...FortiGateAutoscaleSetting,
+ AzureFortiGateAutoscaleSettingSaved: 'fortigate-autoscale-setting-saved'
+};
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export const AzureFortiGateAutoscaleSettingItemDictionary: SettingItemDictionary = {
+ ...FortiGateAutoscaleSettingItemDictionary,
+ [AzureFortiGateAutoscaleSetting.AzureFortiGateAutoscaleSettingSaved]: {
+ keyName: AzureFortiGateAutoscaleSetting.AzureFortiGateAutoscaleSettingSaved,
+ description: 'The flag whether FortiGate Autoscale settings are saved in db or not.',
+ editable: false,
+ jsonEncoded: false,
+ booleanType: true
+ }
+};
diff --git a/core/azure/azure-fortigate-autoscale.ts b/core/azure/azure-fortigate-autoscale.ts
new file mode 100644
index 0000000..d60626f
--- /dev/null
+++ b/core/azure/azure-fortigate-autoscale.ts
@@ -0,0 +1,129 @@
+import { Context } from '@azure/functions';
+import {
+ AzureFortiGateAutoscaleSetting,
+ AzureFortiGateBootstrapStrategy,
+ AzureFunctionHttpTriggerProxy,
+ AzureFunctionInvocable,
+ AzureHybridScalingGroupStrategy,
+ AzurePlatformAdapter,
+ AzureRoutingEgressTrafficViaPrimaryVmStrategy,
+ AzureTaggingAutoscaleVmStrategy
+} from '.';
+import {
+ AutoscaleEnvironment,
+ CloudFunctionInvocationPayload,
+ CloudFunctionInvocationTimeOutError,
+ CloudFunctionProxyAdapter,
+ ConstantIntervalHeartbeatSyncStrategy,
+ FazDeviceAuthorization,
+ JSONable,
+ ReusableLicensingStrategy,
+ WeightedScorePreferredGroupPrimaryElection
+} from '..';
+import {
+ FazReactiveAuthorizationStrategy,
+ FortiGateAutoscale,
+ FortiGateAutoscaleFunctionInvocationHandler
+} from '../fortigate-autoscale';
+
+export class AzureFortiGateAutoscale extends FortiGateAutoscale<
+ TReq,
+ TContext,
+ TRes
+> {
+ constructor(
+ readonly platform: AzurePlatformAdapter,
+ readonly env: AutoscaleEnvironment,
+ readonly proxy: CloudFunctionProxyAdapter
+ ) {
+ super();
+ // TODO: to be implemented
+ // use noop scaling group strategy
+ this.setScalingGroupStrategy(new AzureHybridScalingGroupStrategy(platform, proxy));
+ // use peferred group primary election for Hybrid licensing model
+ this.setPrimaryElectionStrategy(
+ new WeightedScorePreferredGroupPrimaryElection(platform, proxy)
+ );
+ // use a constant interval heartbeat sync strategy
+ this.setHeartbeatSyncStrategy(new ConstantIntervalHeartbeatSyncStrategy(platform, proxy));
+ // TODO: implement the Azure tagging feature
+ // use Azure resource tagging strategy
+ this.setTaggingAutoscaleVmStrategy(new AzureTaggingAutoscaleVmStrategy(platform, proxy));
+ // use FortiGate bootstrap configuration strategy
+ this.setBootstrapConfigurationStrategy(
+ new AzureFortiGateBootstrapStrategy(platform, proxy, env)
+ );
+ // // use the Resuable licensing strategy
+ this.setLicensingStrategy(new ReusableLicensingStrategy(platform, proxy));
+ // TODO: need to figure out how Azure VNet route egress traffic
+ // use the routing egress traffic via primary vm strategy
+ this.setRoutingEgressTrafficStrategy(
+ new AzureRoutingEgressTrafficViaPrimaryVmStrategy(platform, proxy, env)
+ );
+ // use the reactive authorization strategy for FAZ integration
+ this.setFazIntegrationStrategy(new FazReactiveAuthorizationStrategy(platform, proxy));
+ }
+}
+
+export class AzureFortiGateAutoscaleFazAuthHandler extends FortiGateAutoscaleFunctionInvocationHandler {
+ autoscale: AzureFortiGateAutoscale;
+ constructor(autoscale: AzureFortiGateAutoscale) {
+ super();
+ this.autoscale = autoscale;
+ }
+ get proxy(): AzureFunctionHttpTriggerProxy {
+ return this.autoscale.proxy as AzureFunctionHttpTriggerProxy;
+ }
+
+ get platform(): AzurePlatformAdapter {
+ return this.autoscale.platform;
+ }
+
+ async executeInvocable(
+ payload: CloudFunctionInvocationPayload,
+ invocable: string
+ ): Promise {
+ const payloadData: JSONable = JSON.parse(payload.stringifiedData);
+ const settings = await this.platform.getSettings();
+ if (invocable === AzureFunctionInvocable.TriggerFazDeviceAuth) {
+ const fazIpSettingItem = settings.get(AzureFortiGateAutoscaleSetting.FortiAnalyzerIp);
+ if (!fazIpSettingItem.value) {
+ throw new CloudFunctionInvocationTimeOutError(
+ 'FortiAnalyzer IP address not specified.'
+ );
+ }
+ const deviceAuthorization: FazDeviceAuthorization = {
+ vmId: payloadData.vmId as string,
+ privateIp: payloadData.privateIp && String(payloadData.privateIp),
+ publicIp: payloadData.publicIp && String(payloadData.publicIp)
+ };
+
+ // extract the autoscale admin user and faz info
+ const username: string = await this.platform.getSecretFromKeyVault(
+ 'faz-autoscale-admin-username'
+ );
+ const password: string = await this.platform.getSecretFromKeyVault(
+ 'faz-autoscale-admin-password'
+ );
+ const fazIp: string = fazIpSettingItem.value;
+ const fazPort = '443';
+
+ await this.autoscale.fazIntegrationStrategy
+ .processAuthorizationRequest(
+ deviceAuthorization,
+ fazIp,
+ fazPort,
+ username,
+ password
+ )
+ .catch(e => {
+ const error: CloudFunctionInvocationTimeOutError = e;
+ error.extendExecution = false;
+ throw error;
+ });
+ return;
+ }
+ // otherwise, no matching invocable, throw error
+ throw new CloudFunctionInvocationTimeOutError(`No matching invocable for: ${invocable}`);
+ }
+}
diff --git a/core/azure/azure-fortigate-bootstrap-config-strategy.ts b/core/azure/azure-fortigate-bootstrap-config-strategy.ts
new file mode 100644
index 0000000..7a9f6bb
--- /dev/null
+++ b/core/azure/azure-fortigate-bootstrap-config-strategy.ts
@@ -0,0 +1,40 @@
+import { AutoscaleEnvironment, CloudFunctionProxyAdapter } from '..';
+import { FortiGateBootstrapConfigStrategy } from '../fortigate-autoscale';
+import { AzurePlatformAdapter } from '.';
+
+export class AzureFortiGateBootstrapStrategy extends FortiGateBootstrapConfigStrategy {
+ constructor(
+ readonly platform: AzurePlatformAdapter,
+ readonly proxy: CloudFunctionProxyAdapter,
+ readonly env: AutoscaleEnvironment
+ ) {
+ super();
+ }
+ /**
+ *
+ * @override for loading bootstrap config with additional AWS Transit Gateway VPN connections
+ * @returns {Promise} configset content
+ */
+ async loadConfig(): Promise {
+ let baseConfig = await super.loadConfig();
+ // load azure only configset
+ baseConfig += await this.loadExtraPorts();
+ return baseConfig;
+ }
+ /**
+ *
+ * load the configset content for extra ports deployment
+ * @returns {Promise} configset content
+ */
+ async loadExtraPorts(): Promise {
+ this.settings = this.settings || (await this.platform.getSettings());
+ try {
+ return await this.platform.loadConfigSet('extraports');
+ } catch (error) {
+ this.proxy.logAsWarning("extraports configset doesn't exist in the assets storage.");
+ // NOTE: even though not loading the tgw specific configset, return empty string instead
+ // of throwing errors
+ return '';
+ }
+ }
+}
diff --git a/core/azure/azure-function-definitions.ts b/core/azure/azure-function-definitions.ts
new file mode 100644
index 0000000..8a4d57b
--- /dev/null
+++ b/core/azure/azure-function-definitions.ts
@@ -0,0 +1,30 @@
+/* eslint-disable @typescript-eslint/naming-convention */
+interface AzureFunctionDefinition {
+ name: string;
+ path: string;
+}
+
+export const ByolLicense: AzureFunctionDefinition = {
+ name: 'byol-license',
+ path: '/api/byol-license'
+};
+
+export const CustomLog: AzureFunctionDefinition = {
+ name: 'custom-log',
+ path: '/api/custom-log'
+};
+
+export const FazAuthScheduler: AzureFunctionDefinition = {
+ name: 'faz-auth-scheduler',
+ path: '/api/faz-auth-scheduler'
+};
+
+export const FortiGateAutoscaleHandler: AzureFunctionDefinition = {
+ name: 'fgt-as-handler',
+ path: '/api/fgt-as-handler'
+};
+
+export const FazAuthHandler: AzureFunctionDefinition = {
+ name: 'faz-auth-handler',
+ path: '/api/faz-auth-handler'
+};
diff --git a/core/azure/azure-function-invocable.ts b/core/azure/azure-function-invocable.ts
new file mode 100644
index 0000000..769a80c
--- /dev/null
+++ b/core/azure/azure-function-invocable.ts
@@ -0,0 +1,11 @@
+import { CloudFunctionInvocationPayload, CloudFunctionInvocationTimeOutError } from '..';
+import { FortiGateAutoscaleFunctionInvocable } from '../fortigate-autoscale';
+
+export type AzureFunctionInvocationPayload = CloudFunctionInvocationPayload;
+
+export type AzureFunctionInvocableExecutionTimeOutError = CloudFunctionInvocationTimeOutError;
+
+// eslint-disable-next-line @typescript-eslint/naming-convention
+export const AzureFunctionInvocable = {
+ ...FortiGateAutoscaleFunctionInvocable
+};
diff --git a/core/azure/azure-hybrid-scaling-group-strategy.ts b/core/azure/azure-hybrid-scaling-group-strategy.ts
new file mode 100644
index 0000000..f4d0bd4
--- /dev/null
+++ b/core/azure/azure-hybrid-scaling-group-strategy.ts
@@ -0,0 +1,55 @@
+import { CloudFunctionProxyAdapter, ScalingGroupStrategy } from '..';
+import { AzurePlatformAdapter } from '.';
+
+export class AzureHybridScalingGroupStrategy implements ScalingGroupStrategy {
+ platform: AzurePlatformAdapter;
+ proxy: CloudFunctionProxyAdapter;
+ constructor(platform: AzurePlatformAdapter, proxy: CloudFunctionProxyAdapter) {
+ this.platform = platform;
+ this.proxy = proxy;
+ }
+ onLaunchingVm(): Promise {
+ this.proxy.logAsInfo('calling AzureHybridScalingGroupStrategy.onLaunchingVm');
+ this.proxy.logAsInfo('no operation needed in this phase.');
+ this.proxy.logAsInfo('called AzureHybridScalingGroupStrategy.onLaunchingVm');
+ return Promise.resolve('');
+ }
+ onLaunchedVm(): Promise {
+ this.proxy.logAsInfo('calling AzureHybridScalingGroupStrategy.onLaunchedVm');
+ this.proxy.logAsInfo('no operation needed in this phase.');
+ this.proxy.logAsInfo('called AzureHybridScalingGroupStrategy.onLaunchedVm');
+ return Promise.resolve('');
+ }
+ onVmNotLaunched(): Promise {
+ this.proxy.logAsInfo('calling AzureHybridScalingGroupStrategy.onVmNotLaunched');
+ this.proxy.logAsInfo('no operation needed in this phase.');
+ this.proxy.logAsInfo('called AzureHybridScalingGroupStrategy.onVmNotLaunched');
+ return Promise.resolve('');
+ }
+ onTerminatingVm(): Promise {
+ this.proxy.logAsInfo('calling AzureHybridScalingGroupStrategy.onTerminatingVm');
+ this.proxy.logAsInfo('no operation needed in this phase.');
+ this.proxy.logAsInfo('called AzureHybridScalingGroupStrategy.onTerminatingVm');
+ return Promise.resolve('');
+ }
+ onTerminatedVm(): Promise {
+ this.proxy.logAsInfo('calling AzureHybridScalingGroupStrategy.onTerminatedVm');
+ this.proxy.logAsInfo('no operation needed in this phase.');
+ this.proxy.logAsInfo('called AzureHybridScalingGroupStrategy.onTerminatedVm');
+ return Promise.resolve('');
+ }
+ completeLaunching(success = true): Promise {
+ this.proxy.logAsInfo('calling AzureHybridScalingGroupStrategy.onTerminatedVm');
+ this.proxy.logAsInfo(`value passed to parameter: success: ${success}.`);
+ this.proxy.logAsInfo('no operation needed in this phase.');
+ this.proxy.logAsInfo('called AzureHybridScalingGroupStrategy.onTerminatedVm');
+ return Promise.resolve('');
+ }
+ completeTerminating(success = true): Promise {
+ this.proxy.logAsInfo('calling AzureHybridScalingGroupStrategy.completeTerminating');
+ this.proxy.logAsInfo(`value passed to parameter: success: ${success}.`);
+ this.proxy.logAsInfo('no operation needed in this phase.');
+ this.proxy.logAsInfo('called AzureHybridScalingGroupStrategy.completeTerminating');
+ return Promise.resolve('');
+ }
+}
diff --git a/core/azure/azure-platform-adaptee.ts b/core/azure/azure-platform-adaptee.ts
new file mode 100644
index 0000000..e5639c1
--- /dev/null
+++ b/core/azure/azure-platform-adaptee.ts
@@ -0,0 +1,967 @@
+import { ComputeManagementClient } from '@azure/arm-compute';
+import { VirtualMachineScaleSetVM } from '@azure/arm-compute/esm/models';
+import { NetworkManagementClient } from '@azure/arm-network';
+import { NetworkInterface } from '@azure/arm-network/esm/models';
+import {
+ CosmosClient,
+ CosmosClientOptions,
+ Database,
+ FeedResponse,
+ RequestOptions,
+ SqlParameter,
+ SqlQuerySpec
+} from '@azure/cosmos';
+import { ClientSecretCredential } from '@azure/identity';
+import { SecretClient } from '@azure/keyvault-secrets';
+import * as msRestNodeAuth from '@azure/ms-rest-nodeauth';
+import { BlobServiceClient, StorageSharedKeyCredential } from '@azure/storage-blob';
+import * as DBDef from '../db-definitions';
+import axios, { AxiosRequestConfig, AxiosResponse } from 'axios';
+import fs from 'fs';
+import * as HttpStatusCodes from 'http-status-codes';
+import path from 'path';
+import {
+ AzureApiRequestCache,
+ AzureFortiGateAutoscaleSetting,
+ AzureSettings,
+ AzureSettingsDbItem,
+ CosmosDBQueryResult,
+ CosmosDBQueryWhereClause,
+ CosmosDbTableMetaData
+} from '.';
+import {
+ Blob,
+ jsonParseReviver,
+ jsonStringifyReplacer,
+ PlatformAdaptee,
+ SettingItem,
+ Settings
+} from '..';
+
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum RequiredEnvVars {
+ AUTOSCALE_DB_ACCOUNT = 'AUTOSCALE_DB_ACCOUNT',
+ AUTOSCALE_DB_NAME = 'AUTOSCALE_DB_NAME',
+ AUTOSCALE_DB_PRIMARY_KEY = 'AUTOSCALE_DB_PRIMARY_KEY',
+ AUTOSCALE_KEY_VAULT_NAME = 'AUTOSCALE_KEY_VAULT_NAME',
+ AZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT',
+ AZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY',
+ CLIENT_ID = 'CLIENT_ID',
+ CLIENT_SECRET = 'CLIENT_SECRET',
+ RESOURCE_GROUP = 'RESOURCE_GROUP',
+ SUBSCRIPTION_ID = 'SUBSCRIPTION_ID',
+ TENANT_ID = 'TENANT_ID'
+}
+
+export interface ApiCacheRequest {
+ api: string;
+ parameters: string[];
+ ttl?: number;
+}
+
+export interface ApiCacheResult {
+ id?: string;
+ api?: string;
+ parameters?: string[];
+ stringifiedData: string;
+ ttl: number;
+ cacheTime?: number;
+ expired?: boolean;
+}
+
+export interface ApiCache {
+ result: T;
+ hitCache: boolean;
+ cacheTime: number;
+ ttl: number;
+}
+
+/**
+ * Api Cache options
+ * @enum
+ */
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum ApiCacheOption {
+ /**
+ * @member {string} ReadApiFirst always request data from api then save data to cache.
+ */
+ ReadApiFirst = 'ReadApiFirst',
+ /**
+ * @member {string} ReadApiOnly always request data from api but never save data to cache.
+ */
+ ReadApiOnly = 'ReadApiOnly',
+ /**
+ * @member {string} ReadCacheAndDelete read cache, delete the cache. not request data from api
+ */
+ ReadCacheAndDelete = 'ReadCacheAndDelete',
+ /**
+ * @member {string} ReadCacheFirst read cache first. if no cached data, request data from api
+ * then save data to cache.
+ */
+ ReadCacheFirst = 'ReadCacheFirst',
+ /**
+ * @member {string} ReadCacheOnly only read data from cache. not request data from api
+ */
+ ReadCacheOnly = 'ReadCacheOnly'
+}
+
+const TTLS = {
+ listInstances: 600,
+ describeInstance: 600,
+ listNetworkInterfaces: 600
+};
+
+export class AzurePlatformAdaptee implements PlatformAdaptee {
+ protected autoscaleDBRef: Database;
+ protected azureCompute: ComputeManagementClient;
+ protected azureCosmosDB: CosmosClient;
+ protected azureKeyVault: SecretClient;
+ protected azureNetwork: NetworkManagementClient;
+ protected azureStorage: BlobServiceClient;
+ protected settings: Settings;
+ /**
+ * The following process.env are required.
+ * process.env.AUTOSCALE_DB_ACCOUNT: the CosmosDB account name
+ * process.env.AUTOSCALE_DB_NAME: the Autoscale db name.
+ * process.env.CLIENT_ID: the App registration (service principal) app client_id.
+ * process.env.CLIENT_SECRET: the App registration (service principal) app client_secret.
+ * process.env.TENANT_ID: the tenant containing the App registration (service principal) app.
+ */
+ constructor() {
+ // validation
+ const missingEnvVars = Object.keys({ ...RequiredEnvVars }).filter(key => !process.env[key]);
+ if (missingEnvVars.length > 0) {
+ throw new Error(
+ `Missing the following environment variables: ${missingEnvVars.join()}.`
+ );
+ }
+ }
+ /**
+ * Class instance initiation. The following process.env are required.
+ * process.env.AUTOSCALE_DB_ACCOUNT: the CosmosDB account name
+ * process.env.AUTOSCALE_DB_NAME: the Autoscale db name.
+ * process.env.CLIENT_ID: the App registration (service principal) app client_id.
+ * process.env.CLIENT_SECRET: the App registration (service principal) app client_secret.
+ * process.env.TENANT_ID: the tenant containing the App registration (service principal) app.
+ * @returns {Promise} void
+ */
+ async init(): Promise {
+ const cosmosClientOptions: CosmosClientOptions = {
+ endpoint: `https://${process.env.AUTOSCALE_DB_ACCOUNT}.documents.azure.com/`,
+ key: process.env.AUTOSCALE_DB_PRIMARY_KEY
+ };
+ this.azureCosmosDB = new CosmosClient(cosmosClientOptions);
+ this.autoscaleDBRef = this.azureCosmosDB.database(process.env.AUTOSCALE_DB_NAME);
+ const creds = await msRestNodeAuth.loginWithServicePrincipalSecret(
+ process.env.CLIENT_ID,
+ process.env.CLIENT_SECRET,
+ process.env.TENANT_ID
+ );
+ this.azureCompute = new ComputeManagementClient(creds, process.env.SUBSCRIPTION_ID);
+ this.azureNetwork = new NetworkManagementClient(creds, process.env.SUBSCRIPTION_ID);
+ this.azureStorage = new BlobServiceClient(
+ `https://${process.env.AZURE_STORAGE_ACCOUNT}.blob.core.windows.net`,
+ new StorageSharedKeyCredential(
+ process.env.AZURE_STORAGE_ACCOUNT,
+ process.env.AZURE_STORAGE_ACCESS_KEY
+ )
+ );
+ this.azureKeyVault = new SecretClient(
+ `https://${process.env.AUTOSCALE_KEY_VAULT_NAME}.vault.azure.net/`,
+ new ClientSecretCredential(
+ process.env.TENANT_ID,
+ process.env.CLIENT_ID,
+ process.env.CLIENT_SECRET
+ )
+ );
+ }
+
+ async reloadSettings(invalidateCache: boolean): Promise {
+ const table = new AzureSettings();
+ const queryResult: CosmosDBQueryResult =
+ await this.listItemFromDb(table);
+ const res = queryResult.result || [];
+ if (invalidateCache) {
+ this.settings = null;
+ }
+ const records: Map = new Map();
+ res.forEach(rec => records.set(rec.settingKey, rec));
+ const settings: Settings = new Map();
+ Object.values(AzureFortiGateAutoscaleSetting).forEach(value => {
+ if (records.has(value)) {
+ const record = records.get(value);
+ const settingItem = new SettingItem(
+ record.settingKey,
+ record.settingValue,
+ record.description,
+ record.editable,
+ record.jsonEncoded
+ );
+ settings.set(value, settingItem);
+ }
+ });
+ return settings;
+ }
+
+ async loadSettings(): Promise {
+ if (this.settings) {
+ return this.settings;
+ }
+ const data = await this.reloadSettings(false);
+ this.settings = data;
+ return this.settings;
+ }
+
+ /**
+ * get a single item.
+ * @param {Table} table the instance of Table to delete the item.
+ * T is the db item type of the given table.
+ * @param {DBDef.KeyValue[]} partitionKeys the partition keys (primary key)
+ * of the table
+ * @returns {Promise} T
+ */
+ async getItemFromDb(table: DBDef.Table, partitionKeys: DBDef.KeyValue[]): Promise {
+ const primaryKey: DBDef.KeyValue = partitionKeys[0];
+ try {
+ const itemResponse = await this.autoscaleDBRef
+ .container(table.name)
+ // CAUTION: the partition key must be provided in order to get the item.
+ // the partition key must match the same value of the item in the container.
+ .item(primaryKey.value, primaryKey.value)
+ .read();
+ if (itemResponse.statusCode === HttpStatusCodes.OK) {
+ return table.convertRecord({ ...itemResponse.resource });
+ } else {
+ return null;
+ }
+ } catch (error) {
+ if (error.code === HttpStatusCodes.NOT_FOUND) {
+ return null;
+ } else {
+ throw new DBDef.DbReadError(
+ DBDef.DbErrorCode.UnexpectedResponse,
+ JSON.stringify(error)
+ );
+ }
+ }
+ }
+
+ /**
+ * Scan and list all or some record from a given db table
+ * @param {Table} table the instance of Table to list the item.
+ * @param {CosmosDBQueryWhereClause[]} listClause (optional) a filter for listing the records
+ * @param {number} limit (optional) number or records to return
+ * @returns {Promise} CosmosDBQueryResult object with an array of db record
+ * @see https://docs.microsoft.com/en-us/azure/cosmos-db/sql-query-select
+ */
+ async listItemFromDb(
+ table: DBDef.Table,
+ listClause?: CosmosDBQueryWhereClause[],
+ limit?: number
+ ): Promise> {
+ let topClause = '';
+ if (limit && limit > 0) {
+ topClause = ` TOP ${limit}`;
+ }
+ const querySpec: SqlQuerySpec = {
+ query: `SELECT${topClause} * FROM ${table.name} t`
+ };
+ if (listClause && listClause.length > 0) {
+ querySpec.query = `${querySpec.query} WHERE`;
+ querySpec.parameters = listClause.map(clause => {
+ querySpec.query = `${querySpec.query} t.${clause.name} = @${clause.name} AND`;
+ return {
+ name: `@${clause.name}`,
+ value: clause.value
+ } as SqlParameter;
+ });
+ // to remove the last ' AND'
+ querySpec.query = querySpec.query.substr(0, querySpec.query.length - 4);
+ }
+ const queryResult: CosmosDBQueryResult = {
+ query: querySpec.query,
+ result: null
+ };
+ const feeds: FeedResponse = await this.autoscaleDBRef
+ .container(table.name)
+ .items.query(querySpec)
+ .fetchAll();
+ queryResult.result = feeds.resources;
+ return queryResult;
+ }
+ /**
+ * save an item to db. When the optional parameter 'dataIntegrityCheck' is provided, it will
+ * perform a data consistency checking before saving.
+ * The function compares each property of the item against the existing record
+ * with the same primary key in the db table.
+ * It saves the item only when one of the following conditions is met:
+ * condition 1: if parameter dataIntegrityCheck is passed boolean true, it will
+ * only compare the _etag
+ * condition 2: if parameter dataIntegrityCheck is passed a check function that accepts
+ * an input of type T, it will
+ * strictly compare each defined (including null, false and empty value) property
+ * @param {Table} table the instance of Table to save the item.
+ * @param {T} item the item to save
+ * @param {DBDef.SaveCondition} condition save condition
+ * @param {boolean| function} dataIntegrityCheck (optional) ensure data integrity to prevent
+ * saving outdated data.
+ * @returns {Promise} a promise of item of type T
+ */
+ async saveItemToDb(
+ table: DBDef.Table,
+ item: T,
+ condition: DBDef.SaveCondition,
+ dataIntegrityCheck:
+ | boolean
+ | ((dbItemSnapshot: T) => Promise<{
+ result: boolean;
+ errorMessage: string;
+ }>) = true
+ ): Promise {
+ // CAUTION: validate the db input (non meta data)
+ table.validateInput(item);
+ // read the item
+ const itemSnapshot = await this.getItemFromDb(table, [
+ {
+ key: table.primaryKey.name,
+ value: item.id
+ }
+ ]);
+
+ let options: RequestOptions;
+
+ // if date with the same primary key already exists in the db table
+ if (itemSnapshot) {
+ // if a function is provided as dataIntegrityCheck, run the checker function
+ if (typeof dataIntegrityCheck === 'function') {
+ const checkerResult = await dataIntegrityCheck(itemSnapshot);
+ if (!checkerResult.result) {
+ throw new DBDef.DbSaveError(
+ DBDef.DbErrorCode.InconsistentData,
+ `Data integrityCheck failed. ${checkerResult.errorMessage || ''}`
+ );
+ }
+ }
+
+ // NOTE: if dataIntegrityCheck, enforces this access condition
+ options = dataIntegrityCheck && {
+ accessCondition: {
+ type: 'IfMatch',
+ condition: itemSnapshot._etag
+ }
+ };
+ }
+ // update only but no record found
+ if (condition === DBDef.SaveCondition.UpdateOnly && !itemSnapshot) {
+ throw new DBDef.DbSaveError(
+ DBDef.DbErrorCode.NotFound,
+ `Unable to update the item (id: ${item.id}).` +
+ ` The item not exists in the table (name: ${table.name}).`
+ );
+ }
+ // insert only but record found
+ else if (condition === DBDef.SaveCondition.InsertOnly && itemSnapshot) {
+ throw new DBDef.DbSaveError(
+ DBDef.DbErrorCode.KeyConflict,
+ `Unable to insert the item (id: ${item.id}).` +
+ ` The item already exists in the table (name: ${table.name}).`
+ );
+ }
+ // TODO: from the logic above, the condition probably be always false
+ // can remove this block?
+ if (
+ dataIntegrityCheck &&
+ itemSnapshot &&
+ item[table.primaryKey.name] !== itemSnapshot[table.primaryKey.name]
+ ) {
+ throw new DBDef.DbSaveError(
+ DBDef.DbErrorCode.InconsistentData,
+ 'Inconsistent data.' +
+ ' Primary key values not match.' +
+ 'Cannot save item back into db due to' +
+ ' the restriction parameter dataIntegrityCheck is on.'
+ );
+ }
+ // ASSERT: input validation and data consistency checking have passed.
+ // db item meta data properties except for the 'id' do not need to be present so they
+ // will be removed from the object
+ const saveItem = { ...item };
+ // CAUTION: id accepts non-empty string value
+ // will try to set the id when present in the item,
+ // otherwise, will always set id to the same value as primary key
+ saveItem.id =
+ ((item.id || Number(item.id) === 0) && item.id) || String(item[table.primaryKey.name]);
+ delete saveItem._attachments;
+ delete saveItem._etag;
+ delete saveItem._rid;
+ delete saveItem._self;
+ delete saveItem._ts;
+
+ // update or insert
+ const result = await this.autoscaleDBRef
+ .container(table.name)
+ .items.upsert(saveItem, options);
+ if (
+ result.statusCode === HttpStatusCodes.OK ||
+ result.statusCode === HttpStatusCodes.CREATED
+ ) {
+ if (!result.resource) {
+ throw new DBDef.DbSaveError(
+ DBDef.DbErrorCode.UnexpectedResponse,
+ "Upsert doesn't return expected data. see the detailed upsert " +
+ `result:${JSON.stringify(result)}`
+ );
+ }
+ return table.convertRecord(result.resource);
+ } else {
+ throw new DBDef.DbSaveError(
+ DBDef.DbErrorCode.UnexpectedResponse,
+ 'Saving item unsuccessfull. SDK returned unexpected response with ' +
+ ` httpStatusCode: ${result.statusCode}.`
+ );
+ }
+ }
+ /**
+ * Delete a given item from the db
+ * @param {Table} table the instance of Table to save the item.
+ * @param {T} item the item to be deleted. The primary key must be presented for deletion.
+ * @param {boolean} ensureDataConsistency ensure data consistency to prevent deleting outdated
+ * data by doing a full-match of properties of the given item against the item in the db. In
+ * this case, each property including meta data will be compared. Otherwise, only the primary
+ * key will be used for deletion.
+ * @returns {Promise} a promise of void
+ */
+ async deleteItemFromDb(
+ table: DBDef.Table,
+ item: T,
+ ensureDataConsistency = true
+ ): Promise {
+ let itemSnapshot: T;
+ // read the item for comparison if rrequire ensureDataConsistency
+ if (ensureDataConsistency) {
+ // CAUTION: validate the db input (non meta data)
+ table.validateInput(item);
+ // read the item
+ try {
+ itemSnapshot = await this.getItemFromDb(table, [
+ {
+ key: table.primaryKey.name,
+ value: String(item[table.primaryKey.name])
+ }
+ ]);
+ } catch (error) {
+ if (error instanceof DBDef.DbReadError) {
+ throw new DBDef.DbDeleteError(
+ DBDef.DbErrorCode.NotFound,
+ 'Cannot delete item. ' +
+ `Item (id: ${item.id}) not found in table (name: ${table.name}).`
+ );
+ } else {
+ throw error;
+ }
+ }
+ // NOTE: the itemsnapshot may not exist if already deleted by other
+ // db operation.
+ if (!itemSnapshot) {
+ return;
+ }
+ // full match
+ const keyDiff = Object.keys(itemSnapshot).filter(
+ // ensure that item and snapshot both contain the same keys to compare
+ key =>
+ item[key] !== undefined &&
+ itemSnapshot[key] !== undefined &&
+ itemSnapshot[key] !== item[key]
+ );
+ if (keyDiff.length > 0) {
+ throw new DBDef.DbDeleteError(
+ DBDef.DbErrorCode.InconsistentData,
+ `Inconsistent data. The attributes don't match: ${keyDiff.join()}. ` +
+ ` Item to delete: ${JSON.stringify(item)}.` +
+ ` Item in the db: ${JSON.stringify(itemSnapshot)}.`
+ );
+ }
+ }
+ // CAUTION: validate the db input (only primary key)
+ if (item[table.primaryKey.name] === null) {
+ throw new DBDef.DbDeleteError(
+ DBDef.DbErrorCode.InconsistentData,
+ `Required primary key attribute: ${table.primaryKey.name} not` +
+ ` found in item: ${JSON.stringify(item)}`
+ );
+ }
+ // ASSERT: the id and primary key should have the same value
+ if (item.id !== item[table.primaryKey.name]) {
+ throw new DBDef.DbDeleteError(
+ DBDef.DbErrorCode.InconsistentData,
+ "Item primary key value and id value don't match. Make sure the id" +
+ ' and primary key have the same value.'
+ );
+ }
+ // ASSERT: the given item matches the item in the db. It can be now deleted.
+ const deleteResponse = await this.autoscaleDBRef
+ .container(table.name)
+ .item(String(item[table.primaryKey.name]), String(item[table.primaryKey.name]))
+ .delete();
+ if (
+ deleteResponse.statusCode === HttpStatusCodes.OK ||
+ deleteResponse.statusCode === HttpStatusCodes.NO_CONTENT
+ ) {
+ return;
+ } else if (deleteResponse.statusCode === HttpStatusCodes.NOT_FOUND) {
+ throw new DBDef.DbDeleteError(
+ DBDef.DbErrorCode.NotFound,
+ `Item (${table.primaryKey.name}: ` +
+ `${item.id}) not found in table (${table.name})`
+ );
+ } else {
+ throw new DBDef.DbDeleteError(
+ DBDef.DbErrorCode.UnexpectedResponse,
+ 'Deletion unsuccessful. SDK returned unexpected response with ' +
+ ` httpStatusCode: ${deleteResponse.statusCode}.`
+ );
+ }
+ }
+ private generateCacheId(api: string, parameters: string[]): string {
+ // NOTE: id is constructed as -[-,[-...]]
+ return [api, ...parameters.map(String)].join('-');
+ }
+ /**
+ * read a cached response of an API request
+ * @param {ApiCacheRequest} req the api request
+ * @returns {Promise} ApiRequestSave
+ */
+ async apiRequestReadCache(req: ApiCacheRequest): Promise {
+ const table = new AzureApiRequestCache();
+ try {
+ const item = await this.getItemFromDb(table, [
+ {
+ key: table.primaryKey.name,
+ value: this.generateCacheId(req.api, req.parameters)
+ }
+ ]);
+ if (item) {
+ const timeToLive: number = req.ttl || item.ttl;
+ return {
+ id: item.id,
+ stringifiedData: item.res,
+ ttl: item.ttl,
+ cacheTime: item.cacheTime,
+ expired: (item.cacheTime + timeToLive) * 1000 < Date.now()
+ };
+ }
+ } catch (error) {
+ if (error instanceof DBDef.DbReadError) {
+ if (error.code !== DBDef.DbErrorCode.NotFound) {
+ throw error;
+ }
+ }
+ }
+ return null;
+ }
+
+ async apiRequestDeleteCache(req: ApiCacheRequest): Promise {
+ const table = new AzureApiRequestCache();
+ const item = table.downcast({
+ id: this.generateCacheId(req.api, req.parameters),
+ res: null,
+ cacheTime: null,
+ ttl: null
+ });
+ await this.deleteItemFromDb(table, item, false);
+ }
+
+ /**
+ * save a response of an API request to cache
+ * @param {ApiCacheResult} res the api response
+ * @returns {Promise} ApiRequestSave
+ */
+ async apiRequestSaveCache(res: ApiCacheResult): Promise {
+ // if neither of these conditions is met
+ // 1. there is res.id
+ // 2. there are res.api and res.parameters
+ if (!(res.id || (!res.id && res.api && res.parameters))) {
+ throw new Error('Invalid cache result to save. id, api, and paramters are required.');
+ }
+ const table = new AzureApiRequestCache();
+ const item = table.downcast({
+ id: res.id || this.generateCacheId(res.api, res.parameters),
+ res: res.stringifiedData,
+ cacheTime: undefined, // NOTE: cacheTime will use the value of _ts (db generated)
+ ttl: res.ttl * 1000
+ });
+ const savedItem = await this.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.Upsert,
+ false
+ );
+ if (savedItem) {
+ res.cacheTime = savedItem.cacheTime;
+ }
+ return res;
+ }
+ /**
+ * send an api request with appling a caching strategy.
+ * This can prevent from firing too many arm resource requests to Microsoft Azure that
+ * results in throttling resource manager request.
+ * @see https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling
+ * @param {ApiCacheRequest} req an api cache request
+ * @param {ApiCacheOption} cacheOption option for the api caching behavior.
+ * @param {function} dataProcessor a method that process the api request and return
+ * a promise of type D
+ * @returns {Promise} an ApiCache of type D
+ */
+ private async requestWithCaching(
+ req: ApiCacheRequest,
+ cacheOption: ApiCacheOption,
+ dataProcessor: () => Promise
+ ): Promise> {
+ const ttl = 600;
+ let cacheTime: number;
+ let res: ApiCacheResult;
+ let data: D;
+
+ // read cache for those options require reading cache
+ if (cacheOption !== ApiCacheOption.ReadApiOnly) {
+ res = await this.apiRequestReadCache(req);
+ cacheTime = res && res.cacheTime;
+ data = (res && (JSON.parse(res.stringifiedData, jsonParseReviver) as D)) || null;
+ }
+
+ const hitCache = res && res.expired === false;
+
+ // for those options do not require reading data from api
+ if (
+ cacheOption === ApiCacheOption.ReadCacheOnly ||
+ cacheOption === ApiCacheOption.ReadCacheAndDelete
+ ) {
+ // delete the cache if exists
+ if (cacheOption === ApiCacheOption.ReadCacheAndDelete && res) {
+ await this.apiRequestDeleteCache(req);
+ cacheTime = 0;
+ }
+ }
+ // for those options require reading data from api
+ else {
+ if (
+ // read cache first then read api when cache not found
+ (cacheOption === ApiCacheOption.ReadCacheFirst && !res) ||
+ // read data from api only, will not cache the result
+ cacheOption === ApiCacheOption.ReadApiOnly ||
+ // read data from api and then update the cache
+ cacheOption === ApiCacheOption.ReadApiFirst
+ ) {
+ // read data from api
+ data = await dataProcessor();
+ if (data) {
+ // if it requires to save cache, save cache.
+ if (cacheOption !== ApiCacheOption.ReadApiOnly) {
+ if (!res) {
+ res = {
+ stringifiedData: '',
+ ttl: 0
+ };
+ }
+ res.api = req.api;
+ res.parameters = req.parameters;
+ res.stringifiedData = JSON.stringify(data, jsonStringifyReplacer);
+ res.ttl = req.ttl;
+ res = await this.apiRequestSaveCache(res);
+ cacheTime = res.cacheTime;
+ }
+ }
+ }
+ }
+ return {
+ result: data,
+ cacheTime: cacheTime,
+ ttl: ttl,
+ hitCache: hitCache
+ };
+ }
+
+ /**
+ * list vm instances in the given scaling group (vmss)
+ * @param {string} scalingGroupName the scaling group containing the vm
+ * @param {ApiCacheOption} cacheOption (optional) option for the api caching behavior.
+ * default to ApiCacheOption.ReadCacheFirst
+ * @returns {Promise} a list of VirtualMachineScaleSetVM objects
+ */
+ async listInstances(
+ scalingGroupName: string,
+ cacheOption: ApiCacheOption = ApiCacheOption.ReadCacheFirst
+ ): Promise> {
+ const req: ApiCacheRequest = {
+ api: 'listInstances',
+ parameters: [scalingGroupName],
+ ttl: TTLS.listInstances // expected time to live
+ };
+
+ const requestProcessor = async (): Promise => {
+ const response = await this.azureCompute.virtualMachineScaleSetVMs.list(
+ process.env[RequiredEnvVars.RESOURCE_GROUP],
+ scalingGroupName
+ );
+ return (response && response._response.parsedBody) || null;
+ };
+ return await this.requestWithCaching(
+ req,
+ cacheOption,
+ requestProcessor
+ );
+ }
+ /**
+ * describe a virtual machine
+ * @param {string} scalingGroupName the scaling group containing the vm
+ * @param {string} id the id (either integer instanceId or string vmId) of the vm
+ * @param {ApiCacheOption} cacheOption (optional) option for the api caching behavior.
+ * default to ApiCacheOption.ReadCacheFirst
+ * @returns {Promise} ApiCache
+ */
+ async describeInstance(
+ scalingGroupName: string,
+ id: string,
+ cacheOption: ApiCacheOption = ApiCacheOption.ReadCacheFirst
+ ): Promise> {
+ let data: VirtualMachineScaleSetVM;
+ let instanceId: string = id;
+ // ASSERT: id is the vmId to be looked up
+ // NOTE: need to find the corresponding vm.instanceId using vm.vmId by listing all
+ // instances in the vmss and find the vm.
+ if (!isFinite(Number(id))) {
+ let listResult = await this.listInstances(scalingGroupName, cacheOption);
+ data = listResult.result.find(v => v.vmId && v.vmId === id);
+ // NOTE: if vm is a new vm, it won't exist in the cache so try to read from api again
+ // then update cache, just once.
+ if (!data) {
+ listResult = await this.listInstances(
+ scalingGroupName,
+ ApiCacheOption.ReadApiFirst
+ );
+ data = listResult.result.find(v => v.vmId && v.vmId === id);
+ }
+ if (data) {
+ instanceId = data.instanceId;
+ } else {
+ // vm not exists.
+ return {
+ result: null,
+ hitCache: listResult.hitCache,
+ cacheTime: listResult.cacheTime,
+ ttl: listResult.ttl
+ };
+ }
+ }
+ const req: ApiCacheRequest = {
+ api: 'describeInstance',
+ parameters: [scalingGroupName, id],
+ ttl: TTLS.describeInstance // expected time to live
+ };
+ const requestProcessor = async (): Promise => {
+ const response = await this.azureCompute.virtualMachineScaleSetVMs.get(
+ process.env[RequiredEnvVars.RESOURCE_GROUP],
+ scalingGroupName,
+ instanceId,
+ {
+ expand: 'instanceView'
+ }
+ );
+ return response;
+ };
+ return await this.requestWithCaching(req, cacheOption, requestProcessor);
+ }
+ /**
+ * Delete an instance from a scaling group (vmss)
+ * @param {string} scalingGroupName the scaling group containing the vm
+ * @param {number} instanceId the integer instanceId of the vm
+ * @returns {Promise} boolean whether the instance existed and deleted or not exist to delete
+ */
+ async deleteInstanceFromVmss(scalingGroupName: string, instanceId: number): Promise {
+ // CAUTION: when delete instance, must handle cache, otherwise, it can result in
+ // cached data inconsistent.
+ // possibly every method that involves caching should be handled to to delete cache
+ // aka: where requestWithCaching() is applied.
+
+ // providing ApiCacheOption.ReadCacheAndDelete will ensure cache is deleted after read
+ await Promise.all([
+ this.listInstances(scalingGroupName, ApiCacheOption.ReadCacheAndDelete),
+ this.describeInstance(
+ scalingGroupName,
+ String(instanceId),
+ ApiCacheOption.ReadCacheAndDelete
+ ),
+ this.listNetworkInterfaces(
+ scalingGroupName,
+ instanceId,
+ ApiCacheOption.ReadCacheAndDelete
+ )
+ ]);
+
+ // ASSERT: all related caches are deleted. can delete the vm now
+ const response = await this.azureCompute.virtualMachineScaleSetVMs.deleteMethod(
+ process.env[RequiredEnvVars.RESOURCE_GROUP],
+ scalingGroupName,
+ String(instanceId)
+ );
+ if (
+ response._response.status === HttpStatusCodes.OK ||
+ response._response.status === HttpStatusCodes.ACCEPTED
+ ) {
+ return true;
+ } else if (response._response.status === HttpStatusCodes.NO_CONTENT) {
+ return false;
+ } else {
+ throw new Error(`Unkown response with status code: ${response._response.status}.`);
+ }
+ }
+ /**
+ * list network interfaces of a vm in the scaling group (vmss)
+ * @param {string} scalingGroupName the scaling group containing the vm
+ * @param {number} id the integer instanceId of the vm
+ * @param {ApiCacheOption} cacheOption (optional) option for the api caching behavior.
+ * default to ApiCacheOption.ReadCacheFirst
+ * @param {number} ttl (optional) cache time to live in seconds. default to 600
+ * @returns {Promise} ApiCache
+ */
+ async listNetworkInterfaces(
+ scalingGroupName: string,
+ id: number,
+ cacheOption: ApiCacheOption = ApiCacheOption.ReadCacheFirst,
+ ttl = TTLS.listNetworkInterfaces
+ ): Promise> {
+ const req: ApiCacheRequest = {
+ api: 'listNetworkInterfaces',
+ parameters: [scalingGroupName, String(id)],
+ ttl: ttl // expected time to live
+ };
+ const requestProcessor = async (): Promise => {
+ const response =
+ await this.azureNetwork.networkInterfaces.listVirtualMachineScaleSetVMNetworkInterfaces(
+ process.env[RequiredEnvVars.RESOURCE_GROUP],
+ scalingGroupName,
+ String(id)
+ );
+ return (response && response._response.parsedBody) || null;
+ };
+ return await this.requestWithCaching(
+ req,
+ cacheOption,
+ requestProcessor
+ );
+ }
+
+ private streamToBuffer(readableStream: NodeJS.ReadableStream): Promise {
+ return new Promise((resolve, reject) => {
+ const chunks = [];
+ readableStream.on('data', data => {
+ chunks.push(data instanceof Buffer ? data : Buffer.from(data));
+ });
+ readableStream.on('end', () => {
+ resolve(Buffer.concat(chunks));
+ });
+ readableStream.on('error', reject);
+ });
+ }
+ /**
+ * read the content of a blob into a string
+ * @param {string} container the blob container containing the target blob file
+ * @param {string} blobFilePath the full path to the blob file in the container, including
+ * blob file name
+ * @returns {Promise} string
+ */
+ async getBlobContent(container: string, blobFilePath: string): Promise {
+ const containerClient = this.azureStorage.getContainerClient(container);
+ if (!containerClient.exists()) {
+ throw new Error(`blob container (name: ${container}) not exists.`);
+ }
+ const blobClient = containerClient.getBlobClient(blobFilePath);
+ if (!blobClient.exists()) {
+ throw new Error(`blob container (name: ${container}) not exists.`);
+ }
+ // download the blob from position 0 (beginning)
+ const response = await blobClient.download();
+ const buffer = await this.streamToBuffer(response.readableStreamBody);
+ return buffer.toString();
+ }
+ /**
+ * List all blob objects in a given container
+ * @param {string} container the blob container containing the target blob file
+ * @param {string} subdirectory the subdirectory of the container to list
+ * @returns {Promise} an array of blob objects in the given location
+ */
+ async listBlob(container: string, subdirectory?: string): Promise {
+ const prefix = subdirectory || '';
+
+ // DEBUG: for local debugging use, the next lines get files from local file system instead
+ // it is usually useful when doing a mock test that do not require real api calls
+ if (process.env.LOCAL_DEV_MODE === 'true') {
+ return fs
+ .readdirSync(path.resolve(container, prefix))
+ .filter(fileName => {
+ const stat = fs.statSync(path.resolve(container, prefix, fileName));
+ return !stat.isDirectory();
+ })
+ .map(fileName => {
+ return {
+ fileName: fileName,
+ content: ''
+ } as Blob;
+ });
+ } else {
+ const containerClient = this.azureStorage.getContainerClient(container);
+ if (!containerClient.exists()) {
+ throw new Error(`blob container (name: ${container}) not exists.`);
+ }
+ const iterator = containerClient.listBlobsFlat();
+ const blobs: Blob[] = [];
+ let result = await iterator.next();
+ while (!result.done) {
+ blobs.push({
+ fileName: path.basename(result.value.name),
+ filePath: path.dirname(result.value.name)
+ });
+ result = await iterator.next();
+ }
+ return blobs.filter(blob => blob.filePath === subdirectory);
+ }
+ }
+
+ /**
+ * invoke another Azure function
+ * @param {string} functionEndpoint the full function URL, format:
+ * https://.azurewebsites.net/api/
+ * @param {string} payload a JSON stringified JSON object that can be parsed back to a JSON
+ * object without error.
+ * @param {string} accessKey? (optional) function authentication keys
+ * @returns {Promise} a JSON stringified response of the invoked function
+ * @see https://docs.microsoft.com/en-us/azure/azure-functions/functions-bindings-http-webhook-trigger?tabs=csharp#authorization-keys
+ * @see https://docs.microsoft.com/en-us/azure/azure-functions/durable/durable-functions-overview
+ */
+ async invokeAzureFunction(
+ functionEndpoint: string,
+ payload: string,
+ accessKey?: string
+ ): Promise> {
+ // NOTE: make requests to another Azure function using http requests and library axios
+ const reqOptions: AxiosRequestConfig = {
+ method: 'POST',
+ headers: {
+ 'x-functions-key': accessKey
+ },
+ url: functionEndpoint,
+ data: JSON.parse(payload),
+ // NOTE: see the hard timeout
+ // https://docs.microsoft.com/en-us/azure/azure-functions/functions-scale#timeout
+ timeout: 230000 // ms
+ };
+ return await axios(reqOptions);
+ }
+
+ async keyVaultGetSecret(key: string): Promise {
+ const secret = await this.azureKeyVault.getSecret(key);
+ return secret.value;
+ }
+}
diff --git a/core/azure/azure-platform-adapter.ts b/core/azure/azure-platform-adapter.ts
new file mode 100644
index 0000000..3616d51
--- /dev/null
+++ b/core/azure/azure-platform-adapter.ts
@@ -0,0 +1,1800 @@
+import * as AzureComputeModels from '@azure/arm-compute/esm/models';
+import * as AzureNetworkModels from '@azure/arm-network/esm/models';
+import path from 'path';
+import {
+ ApiCache,
+ ApiCacheOption,
+ AzureAutoscale,
+ AzureAutoscaleDbItem,
+ AzureCustomLog,
+ AzureFortiAnalyzer,
+ AzureFortiGateAutoscaleSetting,
+ AzureFortiGateAutoscaleSettingItemDictionary,
+ AzureFunctionDef,
+ AzureFunctionHttpTriggerProxy,
+ AzureFunctionInvocationProxy,
+ AzureFunctionServiceProviderProxy,
+ AzureLicenseStock,
+ AzureLicenseStockDbItem,
+ AzureLicenseUsage,
+ AzureLicenseUsageDbItem,
+ AzurePlatformAdaptee,
+ AzurePrimaryElection,
+ AzurePrimaryElectionDbItem,
+ AzureSettings,
+ CosmosDBQueryWhereClause,
+ LogItem
+} from '.';
+import {
+ Blob,
+ CloudFunctionInvocationPayload,
+ constructInvocationPayload,
+ DeviceSyncInfo,
+ genChecksum,
+ HealthCheckRecord,
+ HealthCheckSyncState,
+ JSONable,
+ LicenseFile,
+ LicenseStockRecord,
+ LicenseUsageRecord,
+ NetworkInterface,
+ NicAttachmentRecord,
+ PlatformAdapter,
+ PrimaryRecord,
+ PrimaryRecordVoteState,
+ ReqMethod,
+ ReqType,
+ ResourceFilter,
+ SettingItemDefinition,
+ Settings,
+ VirtualMachine,
+ VirtualMachineState
+} from '..';
+import { FortiGateAutoscaleServiceRequestSource } from '../fortigate-autoscale';
+import * as DBDef from '../db-definitions';
+
+export type ConsistenyCheckType = { [key in keyof T]?: string | number | boolean | null };
+export class AzurePlatformAdapter implements PlatformAdapter {
+ adaptee: AzurePlatformAdaptee;
+ proxy: AzureFunctionInvocationProxy;
+ createTime: number;
+ settings: Settings;
+ constructor(p: AzurePlatformAdaptee, proxy: AzureFunctionInvocationProxy, createTime?: number) {
+ this.adaptee = p;
+ this.proxy = proxy;
+ this.createTime = (!isNaN(createTime) && createTime) || Date.now();
+ }
+ /**
+ * initiate the class object
+ * @returns {Promise} void
+ */
+ async init(): Promise {
+ // CAUTION: adaptee.init() is required.
+ await this.adaptee.init();
+ this.settings = await this.adaptee.loadSettings();
+ // has settings been migrated from Function environment variables to db yet?
+ const settingsSaved = this.settings.get(
+ AzureFortiGateAutoscaleSetting.AzureFortiGateAutoscaleSettingSaved
+ );
+ // do the settings migration if not yet saved.
+ if (!(settingsSaved && settingsSaved.truthValue)) {
+ await this.saveSettings();
+ // reload the settings
+ this.settings = await this.adaptee.loadSettings();
+ }
+ await this.validateSettings();
+ }
+
+ /**
+ * save settings from node environment variables to db
+ * @returns {Promise} void
+ */
+ async saveSettings(): Promise {
+ // NOTE: this mapping matches each required setting item key with an existing
+ // node environment variable.
+ // key: settingKey, which is a defined setting key.
+ // value: envKey, which is the env var name used in the Function App node environment.
+
+ // invalidate the cache first
+ await this.adaptee.reloadSettings(true);
+ const settingItemMapping: Map = new Map();
+ Object.entries(AzureFortiGateAutoscaleSetting).forEach(([k, v]) => {
+ settingItemMapping.set(k, v);
+ });
+ await Promise.all(
+ Array.from(settingItemMapping.entries()).map(([, envKey]) => {
+ const settingItem: SettingItemDefinition =
+ AzureFortiGateAutoscaleSettingItemDictionary[envKey];
+ // if the setting key not exists in either the setting dictionary or the process.env
+ // return null to be able to filter it out
+ if (!settingItem) {
+ return null;
+ }
+ return this.saveSettingItem(
+ settingItem.keyName,
+ (process.env[envKey] === undefined && 'n/a') || process.env[envKey],
+ settingItem.description,
+ settingItem.jsonEncoded,
+ settingItem.editable
+ );
+ })
+ );
+ // ASSERT: each saveSettingItem completed.
+ // save the flag to the db.
+ const flagItem: SettingItemDefinition =
+ AzureFortiGateAutoscaleSettingItemDictionary[
+ AzureFortiGateAutoscaleSetting.AzureFortiGateAutoscaleSettingSaved
+ ];
+ await this.saveSettingItem(
+ flagItem.keyName,
+ 'true',
+ flagItem.description,
+ flagItem.jsonEncoded,
+ flagItem.editable
+ );
+ }
+ /**
+ * Save a setting to db
+ * @param {string} key the setting key
+ * @param {string} value the setting value
+ * @param {string} description? the setting description
+ * @param {boolean} jsonEncoded? is this setting in json encoded format or not
+ * @param {boolean} editable? is this setting editable
+ * @returns {Promise} the key name of the saved setting
+ */
+ async saveSettingItem(
+ key: string,
+ value: string,
+ description?: string,
+ jsonEncoded?: boolean,
+ editable?: boolean
+ ): Promise {
+ const table = new AzureSettings();
+ const item = table.downcast({
+ settingKey: key,
+ settingValue: value,
+ description: description,
+ jsonEncoded: jsonEncoded,
+ editable: editable
+ });
+ const savedItem = await this.adaptee.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.Upsert
+ );
+ return savedItem.settingKey;
+ }
+ /**
+ * Get the request type defined in enum ReqType
+ * @returns {Promise} the enum value of ReqType
+ */
+ async getRequestType(): Promise {
+ const reqMethod = await this.proxy.getReqMethod();
+ const headers = await this.proxy.getReqHeaders();
+ const body = await this.proxy.getReqBody();
+ const functionName = this.proxy.context.executionContext.functionName;
+ if (this.proxy instanceof AzureFunctionHttpTriggerProxy) {
+ if (functionName === AzureFunctionDef.ByolLicense.name) {
+ if (reqMethod === ReqMethod.GET) {
+ if (headers['fos-instance-id'] === null) {
+ throw new Error(
+ 'Invalid request. fos-instance-id is missing in [GET] request header.'
+ );
+ } else {
+ return Promise.resolve(ReqType.ByolLicense);
+ }
+ } else {
+ throw new Error(`Invalid request. Method [${reqMethod}] not allowd`);
+ }
+ } else if (functionName === AzureFunctionDef.FortiGateAutoscaleHandler.name) {
+ if (reqMethod === ReqMethod.GET) {
+ if (headers['fos-instance-id'] === null) {
+ throw new Error(
+ 'Invalid request. fos-instance-id is missing in [GET] request header.'
+ );
+ } else {
+ return Promise.resolve(ReqType.BootstrapConfig);
+ }
+ } else if (reqMethod === ReqMethod.POST) {
+ if ((body as JSONable).status) {
+ return Promise.resolve(ReqType.StatusMessage);
+ } else if (body.instance) {
+ return Promise.resolve(ReqType.HeartbeatSync);
+ } else {
+ throw new Error(
+ `Invalid request body: [instance: ${body.instance}],` +
+ ` [status: ${body.status}]`
+ );
+ }
+ } else {
+ throw new Error(`Unsupported request method: ${reqMethod}`);
+ }
+ } else if (functionName === AzureFunctionDef.FazAuthHandler.name) {
+ return Promise.resolve(ReqType.CloudFunctionPeerInvocation);
+ } else if (functionName === AzureFunctionDef.CustomLog.name) {
+ return Promise.resolve(ReqType.CustomLog);
+ } else {
+ throw new Error('Unknown request type.');
+ }
+ }
+ // NOTE: if request is a service provider request
+ else if (this.proxy instanceof AzureFunctionServiceProviderProxy) {
+ switch (body.source) {
+ case FortiGateAutoscaleServiceRequestSource.FortiGateAutoscale:
+ return Promise.resolve(ReqType.ServiceProviderRequest);
+ default:
+ throw new Error(
+ `Unsupported CloudFunctionProxy. Request: ${JSON.stringify(
+ this.proxy.request
+ )}`
+ );
+ }
+ } else {
+ throw new Error(`Unsupported CloudFunctionProxy: ${JSON.stringify(this.proxy)}`);
+ }
+ }
+ async getReqDeviceSyncInfo(): Promise {
+ const reqType: ReqType = await this.getRequestType();
+ if (reqType === ReqType.HeartbeatSync || reqType === ReqType.StatusMessage) {
+ const body = await this.proxy.getReqBody();
+ const deviceSyncInfo: DeviceSyncInfo = {
+ // always available
+ instance: (body.instance && String(body.instance)) || null,
+ interval: (body.interval && Number(body.interval)) || NaN,
+ // partially available in some request types
+ status: (body.status && String(body.status)) || undefined,
+ // NOTE: partially available in some device versions
+ sequence: (body.sequence && Number(body.sequence)) || NaN,
+ time: (body.time && String(body.time)) || null,
+ syncTime: (body.sync_time && String(body.sync_time)) || null,
+ syncFailTime: (body.sync_fail_time && String(body.sync_fail_time)) || null,
+ syncStatus: (body.sync_status !== null && Boolean(body.sync_status)) || null,
+ isPrimary: (body.is_primary !== null && Boolean(body.is_primary)) || null,
+ checksum: (body.checksum !== null && String(body.checksum)) || null
+ };
+ return deviceSyncInfo;
+ } else {
+ return null;
+ }
+ }
+ /**
+ * Get the heartbeat interval passing by the request called by a FortiGate
+ * @returns {Promise} heartbeat interval
+ */
+ async getReqHeartbeatInterval(): Promise {
+ const deviceSyncInfo = await this.getReqDeviceSyncInfo();
+ return (deviceSyncInfo && deviceSyncInfo.interval) || NaN;
+ }
+ /**
+ * Get the vm id passing by the request called by a FortiGate.
+ * The vm id is the 'vmId' property of a virtual machine.
+ * @returns {Promise} vmId
+ */
+ async getReqVmId(): Promise {
+ const reqMethod = await this.proxy.getReqMethod();
+ if (reqMethod === ReqMethod.GET) {
+ const headers = await this.proxy.getReqHeaders();
+ return Promise.resolve(headers['fos-instance-id'] as string);
+ } else if (reqMethod === ReqMethod.POST) {
+ const body = await this.proxy.getReqBody();
+ return Promise.resolve(body.instance as string);
+ } else {
+ throw new Error(`Cannot get vm id in unsupported request method: ${reqMethod}`);
+ }
+ }
+ /**
+ * Return the JSON stringified request.
+ * @returns {Promise} request as a string
+ */
+ getReqAsString(): Promise {
+ return Promise.resolve(JSON.stringify(this.proxy.request));
+ }
+ /**
+ * Get the full list of Autoscale Setting items from db
+ * @returns {Promise} Settings (a map)
+ */
+ getSettings(): Promise {
+ return Promise.resolve(this.settings);
+ }
+ /**
+ * Validate the loaded settings to ensure setting item integrity.
+ * @returns {Promise} validation passed is true or false
+ */
+ validateSettings(): Promise {
+ const required = [
+ AzureFortiGateAutoscaleSetting.AutoscaleHandlerUrl,
+ AzureFortiGateAutoscaleSetting.FortiGatePskSecret,
+ AzureFortiGateAutoscaleSetting.FortiGateSyncInterface,
+ AzureFortiGateAutoscaleSetting.FortiGateTrafficPort,
+ AzureFortiGateAutoscaleSetting.FortiGateAdminPort,
+ AzureFortiGateAutoscaleSetting.HeartbeatInterval,
+ AzureFortiGateAutoscaleSetting.ByolScalingGroupName,
+ AzureFortiGateAutoscaleSetting.PaygScalingGroupName
+ ];
+ const missingKeys = required.filter(key => !this.settings.has(key)).join(', ');
+ if (missingKeys) {
+ throw new Error(`The following required setting item not found: ${missingKeys}`);
+ }
+ return Promise.resolve(true);
+ }
+ /**
+ * map an Azure vm object into the Autoscale VirtualMachine class object.
+ * @param {AzureComputeModels.VirtualMachineScaleSetVM} instance vm instance to map
+ * @param {string} scalingGroupName the scaling group containing the vm instance
+ * @param {AzureNetworkModels.NetworkInterface[]} nics network interfaces associated with this
+ * vm instance.
+ * @returns {VirtualMachine} an Autoscale VirtualMachine class object
+ */
+ protected mapVm(
+ instance: AzureComputeModels.VirtualMachineScaleSetVM,
+ scalingGroupName: string,
+ nics: AzureNetworkModels.NetworkInterface[]
+ ): VirtualMachine {
+ let state: VirtualMachineState;
+ let provisioningState: string;
+ let powerState: string;
+
+ this.proxy.logAsInfo('instance in map vm', instance);
+ this.proxy.logAsInfo('Scaling group name', scalingGroupName);
+ this.proxy.logAsInfo('nics in map vm', nics);
+
+ if (instance.instanceView && instance.instanceView.statuses) {
+ instance.instanceView.statuses.forEach(s => {
+ if (s.code.includes('ProvisioningState')) {
+ provisioningState = s.code.split('/')[1];
+ } else if (s.code.includes('PowerState')) {
+ powerState = s.code.split('/')[1];
+ }
+ });
+ }
+
+ this.proxy.logAsInfo('powerState', powerState);
+ // NOTE: see: https://docs.microsoft.com/en-us/azure/virtual-machines/states-lifecycle
+ // there's no terminated state for a vm in Azure because terminated vm will not be visible.
+ if (powerState === 'running') {
+ state = VirtualMachineState.Running;
+ } else if (powerState === 'stopped') {
+ state = VirtualMachineState.Stopped;
+ } else if (powerState === 'deallocated') {
+ state = VirtualMachineState.Deallocated;
+ } else if (powerState === 'starting') {
+ state = VirtualMachineState.Starting;
+ } else if (powerState === 'stopping') {
+ state = VirtualMachineState.Stopping;
+ } else if (provisioningState === 'updating') {
+ state = VirtualMachineState.Updating;
+ } else if (provisioningState === 'creating') {
+ state = VirtualMachineState.Creating;
+ } else if (provisioningState === 'deleting') {
+ state = VirtualMachineState.Terminating;
+ } else {
+ state = VirtualMachineState.Unknown;
+ }
+
+ this.proxy.logAsInfo('state', state);
+
+ // network interface
+ const networkInterfaces = nics.map((nic, index) => {
+ return this.mapNic(nic, index);
+ });
+ const primaryNic = networkInterfaces.length > 0 && networkInterfaces[0];
+ const vm: VirtualMachine = {
+ id: instance.vmId,
+ scalingGroupName: scalingGroupName,
+ primaryPrivateIpAddress: primaryNic && primaryNic.privateIpAddress,
+ // TODO: vm in virtual machine scale set is associated with a load balancer and use
+ // port forwarding to route incoming traffic. So implementation to retrieve the public
+ // ip address of the load balancer would be needed when there's a need to use that
+ // public ip address in a feature. Since retrieving information about the load balancer
+ // also counts toward the arm request limit (see: https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling)
+ // but there's no feature requires the public ip so far, we don't retrieve the public
+ // ip address unless further requirements.
+ primaryPublicIpAddress: null,
+ virtualNetworkId: primaryNic && primaryNic.virtualNetworkId,
+ subnetId: primaryNic && primaryNic.subnetId,
+ securityGroups: [],
+ networkInterfaces: networkInterfaces,
+ networkInterfaceIds: networkInterfaces.map(nic => nic.id),
+ sourceData: {},
+ state: state
+ };
+ Object.assign(vm.sourceData, instance);
+ return vm;
+ }
+ /**
+ * map an Azure network interface object to an Autoscale NetworkInterface class object
+ * @param {AzureNetworkModels.NetworkInterface} eni the Azure network interface object to map
+ * @param {number} index the index of the logical position of the eni in the device
+ * @returns {NetworkInterface} the Autoscale NetworkInterface class object
+ */
+ protected mapNic(eni: AzureNetworkModels.NetworkInterface, index: number): NetworkInterface {
+ this.proxy.logAsInfo('eni', eni);
+ const [primaryIpConfiguration] =
+ (eni && eni.ipConfigurations.filter(ipConfig => ipConfig.primary)) || [];
+ this.proxy.logAsInfo('primaryIpConfiguration', primaryIpConfiguration);
+ const matchVNet = primaryIpConfiguration.subnet.id.match(
+ new RegExp('(?<=virtualNetworks/).*(?=/subnets)')
+ );
+ this.proxy.logAsInfo('matchVNet', matchVNet);
+
+ const matchSubnet = primaryIpConfiguration.subnet.id.match(new RegExp('(?<=subnets/).*'));
+ this.proxy.logAsInfo('matchSubnet', matchSubnet);
+
+ const nic: NetworkInterface = {
+ id: eni.id,
+ privateIpAddress: primaryIpConfiguration && primaryIpConfiguration.privateIPAddress,
+ index: index,
+ subnetId: Array.isArray(matchSubnet) && matchSubnet[0],
+ virtualNetworkId: Array.isArray(matchVNet) && matchVNet[0],
+ attachmentId: undefined, // NOTE: no attachment defined for nic in the Azure platform
+ description: undefined // NOTE: no description defined for nic in the Azure platform
+ };
+
+ this.proxy.logAsInfo('nic', nic);
+
+ return nic;
+ }
+
+ // describeScalingGroup(scalingGroupName: string):
+ /**
+ * Get the virtual machine (representing a FortiGate) that made the request to the Autoscale
+ * function.
+ * @returns {Promise} the requesting vm
+ */
+ async getTargetVm(): Promise {
+ this.proxy.logAsInfo('calling getTargetVm');
+ const byolGroupName = this.settings.get(
+ AzureFortiGateAutoscaleSetting.ByolScalingGroupName
+ ).value;
+ const paygGroupName = this.settings.get(
+ AzureFortiGateAutoscaleSetting.PaygScalingGroupName
+ ).value;
+
+ this.proxy.logAsInfo('byolGroupName', byolGroupName);
+ this.proxy.logAsInfo('paygGroupName', paygGroupName);
+
+ // try to find vm in the byol scaling group
+ let describeInstanceResult: ApiCache;
+ let instance: AzureComputeModels.VirtualMachineScaleSetVM;
+ let scalingGroupName: string;
+ const vmId: string = await this.getReqVmId();
+ this.proxy.logAsInfo('vmId', vmId);
+
+ try {
+ scalingGroupName = byolGroupName;
+ describeInstanceResult = await this.adaptee.describeInstance(scalingGroupName, vmId);
+ this.proxy.logAsInfo('describeInstanceResult', describeInstanceResult);
+
+ // try to find vm in the payg scaling group if not found in byol group
+ if (!describeInstanceResult.result) {
+ scalingGroupName = paygGroupName;
+ describeInstanceResult = await this.adaptee.describeInstance(
+ scalingGroupName,
+ vmId
+ );
+ }
+ // ASSERT: the vm exists in either the byol or the payg scaling group.
+ instance = describeInstanceResult.result;
+ this.proxy.logAsInfo('instance', instance);
+ if (!instance) {
+ throw new Error(`vm (vmId: ${vmId}) not found in any scaling group.`);
+ }
+ } catch (error) {
+ this.proxy.logForError('cannot get target vm', error);
+ throw error;
+ }
+ // ASSERT: the vm is found.
+ // get network interfaces
+ const listNetworkInterfacesResult = await this.adaptee.listNetworkInterfaces(
+ scalingGroupName,
+ Number(instance.instanceId),
+ ApiCacheOption.ReadCacheFirst,
+ describeInstanceResult.ttl
+ );
+
+ this.proxy.logAsInfo('scalingGroupName', scalingGroupName);
+ this.proxy.logAsInfo('listNetworkInterfacesResult', listNetworkInterfacesResult);
+
+ const nics = listNetworkInterfacesResult.result.filter(nic => nic);
+ const vm: VirtualMachine = this.mapVm(instance, scalingGroupName, nics);
+ this.proxy.logAsInfo('vm', vm);
+ this.proxy.logAsInfo('called getTargetVm');
+ return vm;
+ }
+ /**
+ * Get the primary virtual machine (representing a FortiGate) that was elected in the
+ * Autoscale cluster
+ * @returns {Promise} the primary vm in the Autoscale cluster
+ */
+ async getPrimaryVm(): Promise {
+ this.proxy.logAsInfo('calling getPrimaryVm');
+ const primaryRecord = await this.getPrimaryRecord();
+ if (!primaryRecord) {
+ return null;
+ }
+ const describeInstanceResult = await this.adaptee.describeInstance(
+ primaryRecord.scalingGroupName,
+ primaryRecord.vmId
+ );
+ let vm: VirtualMachine;
+ if (describeInstanceResult.result) {
+ // get network interfaces
+ const listNetworkInterfacesResult = await this.adaptee.listNetworkInterfaces(
+ primaryRecord.scalingGroupName,
+ Number(describeInstanceResult.result.instanceId),
+ ApiCacheOption.ReadCacheFirst,
+ describeInstanceResult.ttl
+ );
+ const nics = listNetworkInterfacesResult.result;
+ vm = this.mapVm(describeInstanceResult.result, primaryRecord.scalingGroupName, nics);
+ }
+ this.proxy.logAsInfo('called getPrimaryVm');
+ return vm;
+ }
+
+ async getVmById(vmId: string, scalingGroupName: string): Promise {
+ this.proxy.logAsInfo('calling getVmById');
+ if (!scalingGroupName) {
+ this.proxy.logAsInfo('called getVmById');
+ return null;
+ }
+ const describeInstanceResult = await this.adaptee.describeInstance(scalingGroupName, vmId);
+ let vm: VirtualMachine;
+ if (describeInstanceResult.result) {
+ // get network interfaces
+ const listNetworkInterfacesResult = await this.adaptee.listNetworkInterfaces(
+ scalingGroupName,
+ Number(describeInstanceResult.result.instanceId),
+ ApiCacheOption.ReadCacheFirst,
+ describeInstanceResult.ttl
+ );
+ const nics = listNetworkInterfacesResult.result;
+ vm = this.mapVm(describeInstanceResult.result, scalingGroupName, nics);
+ }
+ this.proxy.logAsInfo('called getVmById');
+ return vm;
+ }
+ /**
+ * List all vm instances of a certain scaling group
+ * @param {string} scalingGroupName the scaling group name to list
+ * @param {boolean} withNics whether retrieve the nic of each vm or not
+ * @returns {Promise} an array of {scalingGroupName, vm instance, its nics(if requested)}
+ */
+ private async listScalingGroupInstances(
+ scalingGroupName: string,
+ withNics = false
+ ): Promise<
+ {
+ scalingGroupName: string;
+ instance: AzureComputeModels.VirtualMachineScaleSetVM;
+ nics: AzureNetworkModels.NetworkInterface[];
+ }[]
+ > {
+ const listInstancesResults = await this.adaptee.listInstances(scalingGroupName);
+ return await Promise.all(
+ listInstancesResults.result.map(async instance => {
+ const res: {
+ scalingGroupName: string;
+ instance: AzureComputeModels.VirtualMachineScaleSetVM;
+ nics: AzureNetworkModels.NetworkInterface[];
+ } = {
+ scalingGroupName: scalingGroupName,
+ instance: instance,
+ nics: []
+ };
+ if (withNics) {
+ const listNetworkInterfacesResult = await this.adaptee.listNetworkInterfaces(
+ scalingGroupName,
+ Number(instance.instanceId)
+ );
+ res.nics = listNetworkInterfacesResult.result || [];
+ }
+ return res;
+ })
+ );
+ }
+ /**
+ * List all vm instances in each scaling group of the Autoscale cluster
+ * @param {boolean} identifyScalingGroup (unused parameter)
+ * @param {boolean} listNic whether retrieve the nic of each vm or not
+ * @returns {Promise} a list of all vm instances in the Autoscale cluster
+ */
+ async listAutoscaleVm(
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ identifyScalingGroup?: boolean, // this variable required by the implementation but isn't used
+ listNic?: boolean
+ ): Promise {
+ this.proxy.logAsInfo('calling listAutoscaleVm');
+ // NOTE: need to list vm in both byol and payg groups
+ const byolGroupName = this.settings.get(
+ AzureFortiGateAutoscaleSetting.ByolScalingGroupName
+ ).value;
+ const paygGroupName = this.settings.get(
+ AzureFortiGateAutoscaleSetting.PaygScalingGroupName
+ ).value;
+ const instances = [
+ ...(await this.listScalingGroupInstances(byolGroupName, listNic)),
+ ...(await this.listScalingGroupInstances(paygGroupName, listNic))
+ ];
+
+ this.proxy.logAsInfo('byolGroupName', byolGroupName);
+ this.proxy.logAsInfo('paygGroupName', paygGroupName);
+ this.proxy.logAsInfo('instances in listAutoscaleVm', instances);
+
+ const vms = instances.map(item =>
+ this.mapVm(item.instance, item.scalingGroupName, item.nics)
+ );
+
+ this.proxy.logAsInfo('vms', vms);
+ this.proxy.logAsInfo('called listAutoscaleVm');
+ return vms;
+ }
+
+ /**
+ * Get the Autoscale health check record of a vm with the given vmId
+ * @param {string} vmId the vmId property of the vm.
+ * @returns {Promise} the health check record
+ */
+ async getHealthCheckRecord(vmId: string): Promise {
+ this.proxy.logAsInfo('calling getHealthCheckRecord');
+ const settings = await this.getSettings();
+ const table = new AzureAutoscale();
+ const dbItem = await this.adaptee.getItemFromDb(table, [
+ {
+ key: table.primaryKey.name,
+ value: vmId
+ }
+ ]);
+
+ let record: HealthCheckRecord;
+
+ if (dbItem) {
+ record = this.parseHealthCheckRecord(dbItem, settings);
+ }
+ this.proxy.logAsInfo('called getHealthCheckRecord');
+ return record;
+ }
+
+ private parseHealthCheckRecord(
+ dbItem: DBDef.AutoscaleDbItem,
+ settings: Settings
+ ): HealthCheckRecord {
+ const maxHeartbeatLossCountSettingItem = settings.get(
+ AzureFortiGateAutoscaleSetting.HeartbeatLossCount
+ );
+
+ const heartbeatDelayAllowanceSettingItem = settings.get(
+ AzureFortiGateAutoscaleSetting.HeartbeatDelayAllowance
+ );
+
+ const maxHeartbeatLossCount: number =
+ (maxHeartbeatLossCountSettingItem && Number(maxHeartbeatLossCountSettingItem.value)) ||
+ 0;
+
+ const heartbeatDelayAllowance: number =
+ (heartbeatDelayAllowanceSettingItem &&
+ Number(heartbeatDelayAllowanceSettingItem.value)) * 1000 || 0;
+
+ // if heartbeatDelay is <= 0, it means hb arrives early or ontime
+ const heartbeatDelay = this.createTime - dbItem.nextHeartBeatTime - heartbeatDelayAllowance;
+
+ const [syncState] = Object.entries(HealthCheckSyncState)
+ .filter(([, value]) => {
+ return dbItem.syncState === value;
+ })
+ .map(([, v]) => v);
+
+ const nextHeartbeatLossCount = dbItem.heartBeatLossCount + ((heartbeatDelay > 0 && 1) || 0);
+
+ const remainingLossAllowed = Math.max(maxHeartbeatLossCount - nextHeartbeatLossCount, 0);
+ // healthy reason: next heartbeat loss count is smaller than max allowed value.
+ const isHealthy = remainingLossAllowed > 0;
+
+ // dumb period
+ const irresponsiveTimeFromNextHeartbeat = Math.max(
+ this.createTime - dbItem.nextHeartBeatTime,
+ 0
+ );
+ const irresponsivePeriod =
+ dbItem.heartBeatInterval > 0
+ ? Math.floor(irresponsiveTimeFromNextHeartbeat / dbItem.heartBeatInterval)
+ : 0;
+
+ return {
+ vmId: dbItem.vmId,
+ scalingGroupName: dbItem.scalingGroupName,
+ ip: dbItem.ip,
+ primaryIp: dbItem.primaryIp,
+ heartbeatInterval: dbItem.heartBeatInterval,
+ heartbeatLossCount: dbItem.heartBeatLossCount,
+ nextHeartbeatTime: dbItem.nextHeartBeatTime,
+ syncState: syncState,
+ // if the prop doesn't exist in item set it to 0 by default
+ syncRecoveryCount: dbItem.syncRecoveryCount || 0,
+ seq: dbItem.seq,
+ healthy: isHealthy,
+ upToDate: true,
+ // the following properities are only available in some device versions
+ // convert string 'null' to null
+ sendTime: dbItem.sendTime === 'null' ? null : dbItem.sendTime,
+ deviceSyncTime: dbItem.deviceSyncTime === 'null' ? null : dbItem.deviceSyncTime,
+ deviceSyncFailTime:
+ dbItem.deviceSyncFailTime === 'null' ? null : dbItem.deviceSyncFailTime,
+ deviceSyncStatus: ['true', 'false'].includes(dbItem.deviceSyncStatus)
+ ? dbItem.deviceSyncStatus === 'true'
+ : null,
+ deviceIsPrimary: ['true', 'false'].includes(dbItem.deviceIsPrimary)
+ ? dbItem.deviceIsPrimary === 'true'
+ : null,
+ deviceChecksum: dbItem.deviceChecksum === 'null' ? null : dbItem.deviceChecksum,
+ irresponsivePeriod: irresponsivePeriod,
+ remainingLossAllowed: remainingLossAllowed
+ };
+ }
+
+ async listHealthCheckRecord(): Promise {
+ this.proxy.logAsInfo('calling listHealthCheckRecord');
+ const settings = await this.getSettings();
+ const table = new AzureAutoscale();
+ const queryResult = await this.adaptee.listItemFromDb(table);
+ const dbItems = queryResult.result || [];
+ const records: HealthCheckRecord[] = dbItems.map(dbItem => {
+ return this.parseHealthCheckRecord(dbItem, settings);
+ });
+
+ this.proxy.logAsInfo('called listHealthCheckRecord');
+ return records;
+ }
+
+ /**
+ * Get the Autoscale health check record of the elected primary vm
+ * @param {DBDef.KeyValue[]} filters optional filter to match the record or null if not match
+ * @returns {Promise} the health check record
+ */
+ async getPrimaryRecord(filters?: DBDef.KeyValue[]): Promise {
+ this.proxy.logAsInfo('calling getPrimaryRecord');
+ const table = new AzurePrimaryElection();
+ const listClause: CosmosDBQueryWhereClause[] =
+ filters &&
+ filters.map(f => {
+ return { name: f.key, value: f.value };
+ });
+ // ASSERT: there's only 1 matching primary record or no matching record.
+ const queryResult = await this.adaptee.listItemFromDb(
+ table,
+ listClause,
+ 1
+ );
+ const [record] = queryResult.result || [];
+ let primaryRecord: PrimaryRecord;
+ if (record) {
+ const [voteState] = Object.entries(PrimaryRecordVoteState)
+ .filter(([, value]) => {
+ return record.voteState === value;
+ })
+ .map(([, v]) => v);
+ const voteTimedOut =
+ voteState !== PrimaryRecordVoteState.Done &&
+ Number(record.voteEndTime) < Date.now();
+ primaryRecord = {
+ id: record.id,
+ vmId: record.vmId,
+ ip: record.ip,
+ scalingGroupName: record.scalingGroupName,
+ virtualNetworkId: record.virtualNetworkId,
+ subnetId: record.subnetId,
+ voteEndTime: Number(record.voteEndTime),
+ voteState: (voteTimedOut && PrimaryRecordVoteState.Timeout) || voteState
+ };
+ }
+
+ this.proxy.logAsInfo('called getPrimaryRecord');
+ return primaryRecord;
+ }
+ /**
+ * The implementation for a comparison method for VirtualMachine class objects
+ * @param {VirtualMachine} vmA vm A to compare with
+ * @param {VirtualMachine} vmB vm B to compare with
+ * @returns {boolean} true if only they are deemed 'the same'.
+ */
+ vmEquals(vmA?: VirtualMachine, vmB?: VirtualMachine): boolean {
+ if (!(vmA && vmB)) {
+ return false;
+ }
+ const keyDiff = [
+ 'id',
+ 'scalingGroupName',
+ 'primaryPrivateIpAddress',
+ 'virtualNetworkId',
+ 'subnetId'
+ ].filter(k => vmA[k] !== vmB[k]);
+
+ return keyDiff.length === 0;
+ }
+ /**
+ * upsert an Autoscale health check record
+ * @param {HealthCheckRecord} rec the health check record to save
+ * @returns {Promise} void
+ */
+ async createHealthCheckRecord(rec: HealthCheckRecord): Promise {
+ this.proxy.logAsInfo('calling createHealthCheckRecord');
+ const table = new AzureAutoscale();
+ const [syncStateString] = Object.entries(HealthCheckSyncState)
+ .filter(([, value]) => {
+ return rec.syncState === value;
+ })
+ .map(([, v]) => v);
+ const item = table.downcast({
+ vmId: rec.vmId,
+ scalingGroupName: rec.scalingGroupName,
+ ip: rec.ip,
+ primaryIp: rec.primaryIp,
+ heartBeatInterval: rec.heartbeatInterval,
+ heartBeatLossCount: rec.heartbeatLossCount,
+ nextHeartBeatTime: rec.nextHeartbeatTime,
+ syncState: syncStateString,
+ syncRecoveryCount: rec.syncRecoveryCount,
+ seq: rec.seq,
+ sendTime: rec.sendTime,
+ deviceSyncTime: rec.deviceSyncTime,
+ deviceSyncFailTime: rec.deviceSyncFailTime,
+ // store boolean | null
+ deviceSyncStatus:
+ (rec.deviceSyncStatus === null && 'null') ||
+ (rec.deviceSyncStatus && 'true') ||
+ 'false',
+ // store boolean | null
+ deviceIsPrimary:
+ (rec.deviceIsPrimary === null && 'null') ||
+ (rec.deviceIsPrimary && 'true') ||
+ 'false',
+ deviceChecksum: rec.deviceChecksum
+ });
+ // NOTE: when create a db record, do not need to check data consistency.
+ await this.adaptee.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.InsertOnly,
+ false
+ );
+ this.proxy.logAsInfo('called createHealthCheckRecord');
+ }
+ /**
+ * update an existing Autoscale health check record.
+ * @param {HealthCheckRecord} rec record to update
+ * @returns {Promise} void
+ */
+ async updateHealthCheckRecord(rec: HealthCheckRecord): Promise {
+ this.proxy.logAsInfo('calling updateHealthCheckRecord');
+ const table = new AzureAutoscale();
+ const [syncStateString] = Object.entries(HealthCheckSyncState)
+ .filter(([, value]) => {
+ return rec.syncState === value;
+ })
+ .map(([, v]) => v);
+ let deviceSyncStatus: string;
+ if (rec.deviceSyncStatus === null) {
+ deviceSyncStatus = 'null';
+ } else if (rec.deviceSyncStatus) {
+ deviceSyncStatus = 'true';
+ } else {
+ deviceSyncStatus = 'false';
+ }
+ let deviceIsPrimary: string;
+ if (rec.deviceIsPrimary === null) {
+ deviceIsPrimary = 'null';
+ } else if (rec.deviceIsPrimary) {
+ deviceIsPrimary = 'true';
+ } else {
+ deviceIsPrimary = 'false';
+ }
+ const item = table.downcast({
+ vmId: rec.vmId,
+ scalingGroupName: rec.scalingGroupName,
+ ip: rec.ip,
+ primaryIp: rec.primaryIp,
+ heartBeatInterval: rec.heartbeatInterval,
+ heartBeatLossCount: rec.heartbeatLossCount,
+ nextHeartBeatTime: rec.nextHeartbeatTime,
+ syncState: syncStateString,
+ syncRecoveryCount: rec.syncRecoveryCount,
+ seq: rec.seq,
+ sendTime: rec.sendTime,
+ deviceSyncTime: rec.deviceSyncTime,
+ deviceSyncFailTime: rec.deviceSyncFailTime,
+ // store boolean | null
+ deviceSyncStatus: deviceSyncStatus,
+ // store boolean | null
+ deviceIsPrimary: deviceIsPrimary,
+ deviceChecksum: rec.deviceChecksum
+ });
+
+ const checker = (
+ snapshot: typeof item
+ ): Promise<{ result: boolean; errorMessage: string }> => {
+ const propToCheck = {
+ vmId: rec.vmId,
+ scalingGroupName: rec.scalingGroupName
+ };
+ const difference = this.dataDiff(propToCheck, snapshot);
+ // NOTE: strictly update the record when the sequence to update is equal to or greater
+ // than the seq in the db to ensure data not to fall back to old value in race conditions
+ let noError = true;
+ let errorMessage = '';
+ if (Object.keys(difference).length) {
+ noError = false;
+ errorMessage = Object.keys(difference)
+ .map(k => {
+ return `key: ${k}, expected: ${propToCheck[k]}, existing: ${difference[k]};`;
+ })
+ .join(' ');
+ }
+ // NOTE: allow sequence smaller than the stored one only if the sendTime is older
+ // the reason is if the device is rebooted, the seq value is reset to 0, then the seq
+ // becomes smaller than the stored one. However, the sendTime is still greater
+ // Therefore, do not allow smaller seq while sendTime is also smaller. This is the case
+ // when the request is out-of-date.
+ if (rec.seq < snapshot.seq && rec.sendTime <= snapshot.sendTime) {
+ noError = false;
+ errorMessage =
+ `The seq (${rec.seq}) and send time (${rec.sendTime}) indicate that ` +
+ `this request is out-of-date. values in db (seq: ${snapshot.seq}, ` +
+ `send time: ${snapshot.sendTime}). ${errorMessage}`;
+ }
+ return Promise.resolve({
+ result: noError,
+ errorMessage: errorMessage
+ });
+ };
+ await this.adaptee.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.Upsert,
+ checker
+ );
+ this.proxy.logAsInfo('called updateHealthCheckRecord');
+ }
+ async deleteHealthCheckRecord(rec: HealthCheckRecord): Promise {
+ this.proxy.logAsInfo('calling deleteHealthCheckRecord');
+ const table = new AzureAutoscale();
+ const [syncStateString] = Object.entries(HealthCheckSyncState)
+ .filter(([, value]) => {
+ return rec.syncState === value;
+ })
+ .map(([, v]) => v);
+ let deviceSyncStatus: string;
+ if (rec.deviceSyncStatus === null) {
+ deviceSyncStatus = 'null';
+ } else if (rec.deviceSyncStatus) {
+ deviceSyncStatus = 'true';
+ } else {
+ deviceSyncStatus = 'false';
+ }
+ let deviceIsPrimary: string;
+ if (rec.deviceIsPrimary === null) {
+ deviceIsPrimary = 'null';
+ } else if (rec.deviceIsPrimary) {
+ deviceIsPrimary = 'true';
+ } else {
+ deviceIsPrimary = 'false';
+ }
+ const item = table.downcast({
+ vmId: rec.vmId,
+ scalingGroupName: rec.scalingGroupName,
+ ip: rec.ip,
+ primaryIp: rec.primaryIp,
+ heartBeatInterval: rec.heartbeatInterval,
+ heartBeatLossCount: rec.heartbeatLossCount,
+ nextHeartBeatTime: rec.nextHeartbeatTime,
+ syncState: syncStateString,
+ syncRecoveryCount: rec.syncRecoveryCount,
+ seq: rec.seq,
+ sendTime: rec.sendTime,
+ deviceSyncTime: rec.deviceSyncTime,
+ deviceSyncFailTime: rec.deviceSyncFailTime,
+ // store boolean | null
+ deviceSyncStatus: deviceSyncStatus,
+ // store boolean | null
+ deviceIsPrimary: deviceIsPrimary,
+ deviceChecksum: rec.deviceChecksum
+ });
+
+ await this.adaptee.deleteItemFromDb(table, item);
+ this.proxy.logAsInfo('called deleteHealthCheckRecord');
+ }
+ /**
+ * insert a primary record, not overwrite one with the same primary key.
+ * can also optionally replace an existing one with a given primary key value
+ * @param {PrimaryRecord} rec primary record to insert
+ * @param {PrimaryRecord} oldRec existing primary record to replace
+ * @returns {Promise} void
+ */
+ async createPrimaryRecord(rec: PrimaryRecord, oldRec: PrimaryRecord): Promise {
+ this.proxy.logAsInfo('calling createPrimaryRecord.');
+ const table = new AzurePrimaryElection();
+ const item = table.downcast({
+ id: rec.id,
+ scalingGroupName: rec.scalingGroupName,
+ ip: rec.ip,
+ vmId: rec.vmId,
+ virtualNetworkId: rec.virtualNetworkId,
+ subnetId: rec.subnetId,
+ voteEndTime: rec.voteEndTime,
+ voteState: rec.voteState
+ });
+ // save record only if record for a certain scaling group name not exists, or
+ // if it exists but timeout.
+ // if specified an old rec to purge, use a strict conditional expression to replace.
+ try {
+ if (oldRec) {
+ this.proxy.logAsInfo(
+ `purging existing record (id: ${oldRec.id}, ` +
+ `scalingGroup: ${oldRec.scalingGroupName}, vmId: ${oldRec.vmId})`
+ );
+ const itemToDelete = table.downcast({ ...oldRec });
+ // NOTE: the voteState in the db record will be either 'pending' or 'done'.
+ // As soon as the voteState is still 'pending', and voteEndTime has expired,
+ // the primary record is deemed timeout. Therefore, the 'timeout' state will
+ // not need to be updated on the db record. Should alter it to 'pending' when
+ // deleting.
+ if (itemToDelete.voteState === PrimaryRecordVoteState.Timeout) {
+ itemToDelete.voteState = PrimaryRecordVoteState.Pending;
+ }
+ // NOTE: if the new and old records are for the same primary vm, and the
+ // old record indicates that it has timed out, do not need
+ // to check data consistency.
+ const consistencyCheckRequired = !(
+ rec.id === oldRec.id && oldRec.voteState === PrimaryRecordVoteState.Timeout
+ );
+ await this.adaptee.deleteItemFromDb(
+ table,
+ itemToDelete,
+ consistencyCheckRequired
+ );
+ }
+ } catch (error) {
+ this.proxy.logForError('DB error.', error);
+ if (error instanceof DBDef.DbDeleteError) {
+ this.proxy.logAsError(`Cannot purge old primary record (id: ${oldRec.id})`);
+ }
+ throw error;
+ }
+ try {
+ // save the new record
+ await this.adaptee.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.InsertOnly // ASSERT: if record exists, will throw error
+ );
+ this.proxy.logAsInfo('called createPrimaryRecord.');
+ } catch (error) {
+ this.proxy.logForError('DB error.', error);
+ if (
+ error instanceof DBDef.DbSaveError &&
+ error.code === DBDef.DbErrorCode.KeyConflict
+ ) {
+ this.proxy.logAsError(`Primary record already exists (id: ${item.id})`);
+ }
+ this.proxy.logAsInfo('called createPrimaryRecord.');
+ throw error;
+ }
+ this.proxy.logAsInfo('called createPrimaryRecord.');
+ }
+ /**
+ * Insert a new primary record or update it only when the primary key is the same.
+ * @param {PrimaryRecord} rec primary record to update
+ * @returns {Promise} void
+ */
+ async updatePrimaryRecord(rec: PrimaryRecord): Promise {
+ this.proxy.logAsInfo('calling updatePrimaryRecord.');
+ const table = new AzurePrimaryElection();
+ const item = table.downcast({
+ id: rec.id,
+ scalingGroupName: rec.scalingGroupName,
+ ip: rec.ip,
+ vmId: rec.vmId,
+ virtualNetworkId: rec.virtualNetworkId,
+ subnetId: rec.subnetId,
+ voteEndTime: rec.voteEndTime,
+ voteState: rec.voteState
+ });
+ // save record only if the keys in rec match the keys in db
+ // save record only when the elected primary match the record
+ // and vote state is still pending and the voting not end yet
+ let existingRec: typeof item;
+ try {
+ existingRec = await this.adaptee.getItemFromDb(table, [
+ {
+ key: table.primaryKey.name,
+ value: String(rec[table.primaryKey.name])
+ }
+ ]);
+ } catch (error) {
+ this.proxy.logAsInfo(`Primary record (id: ${rec.id}) not found. Will create one.`);
+ }
+ // if primary record already exists,
+ // save record only if the keys in rec match the keys in db
+ if (existingRec) {
+ if (rec.scalingGroupName !== existingRec.scalingGroupName) {
+ throw new Error(
+ 'Primary record value not match on attribute: scalingGroupName.' +
+ ` Exptected: ${rec.scalingGroupName}, found: ${existingRec.scalingGroupName}`
+ );
+ } else if (existingRec.id !== existingRec.id) {
+ throw new Error(
+ 'Primary record value not match on attribute: id.' +
+ ` Exptected: ${rec.id}, found: ${existingRec.id}`
+ );
+ } else if (existingRec.voteState !== PrimaryRecordVoteState.Pending) {
+ throw new Error(
+ 'Primary record vote state not match.' +
+ ` Expected: ${PrimaryRecordVoteState.Pending}, found: ${existingRec.voteState}`
+ );
+ } else if (existingRec.voteEndTime <= Date.now()) {
+ throw new Error(
+ `Primary record vote ended (at ${existingRec.voteEndTime}) already.` +
+ ` It's ${Date.now()} now.`
+ );
+ }
+ }
+
+ const checker = (
+ snapshot: typeof item
+ ): Promise<{ result: boolean; errorMessage: string }> => {
+ const propToCheck = {
+ id: item.id,
+ scalingGroupName: item.scalingGroupName
+ };
+ const difference = this.dataDiff(propToCheck, snapshot);
+ return Promise.resolve({
+ result: Object.keys(difference).length === 0,
+ errorMessage:
+ Object.keys(difference)
+ .map(k => {
+ return `key: ${k}, expected: ${propToCheck[k]}, existing: ${difference[k]};`;
+ })
+ .join(' ') || ''
+ });
+ };
+
+ // upsert
+ await this.adaptee.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.Upsert,
+ checker
+ );
+ this.proxy.logAsInfo('called updatePrimaryRecord.');
+ }
+
+ async deletePrimaryRecord(rec: PrimaryRecord, fullMatch?: boolean): Promise {
+ this.proxy.logAsInfo('calling updatePrimaryRecord.');
+ const table = new AzurePrimaryElection();
+ const item = table.downcast({
+ id: rec.id,
+ scalingGroupName: rec.scalingGroupName,
+ ip: rec.ip,
+ vmId: rec.vmId,
+ virtualNetworkId: rec.virtualNetworkId,
+ subnetId: rec.subnetId,
+ voteEndTime: rec.voteEndTime,
+ voteState: rec.voteState
+ });
+
+ await this.adaptee.deleteItemFromDb(table, item, fullMatch);
+ this.proxy.logAsInfo('called updatePrimaryRecord.');
+ }
+ /**
+ * Load a configset file from blob storage
+ * The blob container will use the AssetStorageContainer or CustomAssetContainer,
+ * and the location prefix will use AssetStorageDirectory or CustomAssetDirectory.
+ * The full file path will be: \/\/configset/\
+ * @param {string} name the configset name
+ * @param {boolean} custom (optional) whether load it from a custom location or not
+ * @returns {Promise} the configset content as a string
+ */
+ async loadConfigSet(name: string, custom?: boolean): Promise {
+ this.proxy.logAsInfo(`loading${custom ? ' (custom)' : ''} configset: ${name}`);
+ const container = custom
+ ? this.settings.get(AzureFortiGateAutoscaleSetting.CustomAssetContainer)
+ : this.settings.get(AzureFortiGateAutoscaleSetting.AssetStorageContainer);
+ const keyPrefix = custom
+ ? this.settings.get(AzureFortiGateAutoscaleSetting.CustomAssetDirectory)
+ : this.settings.get(AzureFortiGateAutoscaleSetting.AssetStorageDirectory);
+ if (!(container && container.value)) {
+ throw new Error('Missing setting item for: storage container for configset.');
+ }
+
+ const filePath = path.posix.join(...[keyPrefix.value, 'configset', name].filter(k => !!k));
+ this.proxy.logAsDebug(
+ `Load blob in container [${container.value}], path:` + `[${filePath}]`
+ );
+ const content = await this.adaptee.getBlobContent(container.value, filePath);
+ this.proxy.logAsInfo('configset loaded.');
+ return content;
+ }
+ /**
+ * List all configset files in a specified blob container location
+ * The blob container will use the AssetStorageContainer or CustomAssetContainer,
+ * and the location prefix will use AssetStorageDirectory or CustomAssetDirectory.
+ * There will be an optional subDirectory provided as parameter.
+ * The full file path will be: \/\[/\]/configset
+ * @param {string} subDirectory additional subdirectory
+ * @param {boolean} custom (optional) whether load it from a custom location or not
+ * @returns {Promise} the configset content as a string
+ */
+ async listConfigSet(subDirectory?: string, custom?: boolean): Promise {
+ this.proxy.logAsInfo('calling listConfigSet');
+ // it will load configsets from the location:
+ // in custom mode: /CustomAssetContainer/CustomAssetDirectory[/subDirectory]/configset/
+ // in normal mode: /AssetStorageContainer/AssetStorageDirectory[/subDirectory]/configset/
+ const container = custom
+ ? this.settings.get(AzureFortiGateAutoscaleSetting.CustomAssetContainer)
+ : this.settings.get(AzureFortiGateAutoscaleSetting.AssetStorageContainer);
+
+ const directory = custom
+ ? this.settings.get(AzureFortiGateAutoscaleSetting.CustomAssetDirectory)
+ : this.settings.get(AzureFortiGateAutoscaleSetting.AssetStorageDirectory);
+ let blobs: Blob[] = [];
+ if (!container.value) {
+ this.proxy.logAsInfo('No container is specified. No configset loaded.');
+ return [];
+ }
+
+ const location = path.posix.join(
+ ...[directory.value, subDirectory || null, 'configset'].filter(r => !!r)
+ );
+
+ try {
+ this.proxy.logAsInfo(
+ `List configset in container: ${container.value}, directory: ${location}`
+ );
+ blobs = await this.adaptee.listBlob(container.value, location);
+ } catch (error) {
+ this.proxy.logAsWarning(error);
+ }
+ this.proxy.logAsInfo('called listConfigSet');
+ return blobs;
+ }
+ async deleteVmFromScalingGroup(vmId: string): Promise {
+ this.proxy.logAsInfo('calling deleteVmFromScalingGroup');
+ try {
+ const vms = await this.listAutoscaleVm();
+ const [vm] = vms.filter(v => v.id === vmId) || [];
+ if (!vm) {
+ this.proxy.logAsWarning(`vm (id: ${vmId}) not found. skip deleting it.`);
+ } else {
+ const scalingGroupName = vm.scalingGroupName;
+ const success = await this.adaptee.deleteInstanceFromVmss(
+ scalingGroupName,
+ Number(vm.sourceData.instanceId)
+ );
+ if (success) {
+ this.proxy.logAsInfo(`delete completed. vm (id: ${vmId}) is deleted.`);
+ } else {
+ this.proxy.logAsWarning(`delete completed. vm (id: ${vmId}) not found.)`);
+ }
+ }
+ } catch (error) {
+ this.proxy.logForError('Failed to delele vm from scaling group.', error);
+ }
+ this.proxy.logAsInfo('called deleteVmFromScalingGroup');
+ }
+ async listLicenseFiles(
+ storageContainerName: string,
+ licenseDirectoryName: string
+ ): Promise {
+ this.proxy.logAsInfo('calling listLicenseFiles');
+ const blobs: Blob[] = await this.adaptee.listBlob(
+ storageContainerName,
+ licenseDirectoryName
+ );
+ this.proxy.logAsInfo(
+ storageContainerName,
+ licenseDirectoryName,
+ `file count: ${blobs.length}`
+ );
+ this.proxy.logAsDebug('blobs:', JSON.stringify(blobs));
+ const licenseFiles = await Promise.all(
+ blobs.map(async blob => {
+ const filePath = path.posix.join(licenseDirectoryName, blob.fileName);
+ const content = await this.adaptee.getBlobContent(storageContainerName, filePath);
+ const algorithm = 'sha256';
+ const licenseFile: LicenseFile = {
+ fileName: blob.fileName,
+ checksum: genChecksum(content, algorithm),
+ algorithm: algorithm,
+ content: content
+ };
+ return licenseFile;
+ })
+ );
+ this.proxy.logAsInfo('calling listLicenseFiles');
+ return licenseFiles;
+ }
+ async listLicenseStock(productName: string): Promise {
+ this.proxy.logAsInfo('calling listLicenseStock');
+ const table = new AzureLicenseStock();
+ const queryResult = await this.adaptee.listItemFromDb(table);
+ const dbItems = queryResult.result || [];
+ const mapItems = dbItems
+ .filter(item => item.productName === productName)
+ .map(item => {
+ return {
+ fileName: item.fileName,
+ checksum: item.checksum,
+ algorithm: item.algorithm,
+ productName: item.productName
+ } as LicenseStockRecord;
+ });
+ this.proxy.logAsInfo('called listLicenseStock');
+ return mapItems;
+ }
+ async listLicenseUsage(productName: string): Promise {
+ this.proxy.logAsInfo('calling listLicenseUsage');
+ const table = new AzureLicenseUsage();
+ const queryResult = await this.adaptee.listItemFromDb(table);
+ const dbItems = queryResult.result || [];
+ const mapItems = dbItems
+ .filter(item => item.productName === productName)
+ .map(item => {
+ return {
+ fileName: item.fileName,
+ checksum: item.checksum,
+ algorithm: item.algorithm,
+ productName: item.productName,
+ vmId: item.vmId,
+ scalingGroupName: item.scalingGroupName,
+ assignedTime: item.assignedTime,
+ vmInSync: item.vmInSync
+ } as LicenseUsageRecord;
+ });
+ this.proxy.logAsInfo('called listLicenseUsage');
+ return mapItems;
+ }
+ async updateLicenseStock(records: LicenseStockRecord[]): Promise {
+ this.proxy.logAsInfo('calling updateLicenseStock');
+ const table = new AzureLicenseStock();
+ const queryResult = await this.adaptee.listItemFromDb(table);
+ const dbItems = queryResult.result || [];
+ // load all license stock records in the db
+ const items = new Map(
+ dbItems.map(item => {
+ return [item.checksum, item];
+ })
+ );
+ let errorCount = 0;
+ const stockRecordChecksums = Array.from(items.keys());
+ await Promise.all(
+ // read the content of each license file
+ records.map(record => {
+ const item = table.downcast({
+ checksum: record.checksum,
+ algorithm: record.algorithm,
+ fileName: record.fileName,
+ productName: record.productName
+ });
+ let typeText: string;
+ let saveCondition: DBDef.SaveCondition;
+ // recrod exists, update it
+ if (items.has(record.checksum)) {
+ stockRecordChecksums.splice(stockRecordChecksums.indexOf(record.checksum), 1);
+ saveCondition = DBDef.SaveCondition.UpdateOnly;
+ typeText =
+ `update existing item (filename: ${record.fileName},` +
+ ` checksum: ${record.checksum})`;
+ } else {
+ saveCondition = DBDef.SaveCondition.Upsert;
+ typeText =
+ `create new item (filename: ${record.fileName},` +
+ ` checksum: ${record.checksum})`;
+ }
+ return this.adaptee
+ .saveItemToDb(table, item, saveCondition, false)
+ .catch(err => {
+ this.proxy.logForError(`Failed to ${typeText}.`, err);
+ errorCount++;
+ });
+ })
+ );
+ // remove those records which don't have a corresponding license file.
+ await Promise.all(
+ stockRecordChecksums.map(checksum => {
+ const item = items.get(checksum);
+ return this.adaptee
+ .deleteItemFromDb(table, item)
+ .catch(err => {
+ this.proxy.logForError(
+ `Failed to delete item (filename: ${item.fileName}) from db.`,
+ err
+ );
+ errorCount++;
+ });
+ })
+ );
+ if (errorCount > 0) {
+ this.proxy.logAsInfo('called updateLicenseStock');
+
+ throw new Error('updateLicenseStock unsuccessfully.');
+ }
+ this.proxy.logAsInfo('called updateLicenseStock');
+ }
+ async updateLicenseUsage(
+ records: { item: LicenseUsageRecord; reference: LicenseUsageRecord }[]
+ ): Promise {
+ this.proxy.logAsInfo('calling updateLicenseUsage');
+ const table = new AzureLicenseUsage();
+ // get all records from the db as a snapshot
+ const queryResult = await this.adaptee.listItemFromDb(table);
+ const dbItems = queryResult.result || [];
+ const items = new Map(
+ dbItems.map(item => {
+ return [item.checksum, item];
+ })
+ );
+ let errorCount = 0;
+ await Promise.all(
+ records.map(rec => {
+ const item = table.downcast({
+ checksum: rec.item.checksum,
+ algorithm: rec.item.algorithm,
+ fileName: rec.item.fileName,
+ productName: rec.item.productName,
+ vmId: rec.item.vmId,
+ scalingGroupName: rec.item.scalingGroupName,
+ assignedTime: rec.item.assignedTime,
+ vmInSync: rec.item.vmInSync
+ });
+ let typeText: string;
+ let saveCondition: DBDef.SaveCondition;
+ // update if record exists
+ // NOTE: for updating an existing record, it requires a reference of the existing
+ // record as a snapshot of db data. Only when the record data at the time of updating
+ // matches exactly the same as the snapshot, the update succeeds. Otherwise, the
+ // record is considered changed, and inconsistent anymore, thus not allowing updating.
+ if (items.has(rec.item.checksum)) {
+ // ASSERT: it must have a referenced record to replace. otherwise, if should fail
+ if (!rec.reference) {
+ typeText = `update existing item (checksum: ${rec.item.checksum}). `;
+ this.proxy.logAsError(
+ `Failed to ${typeText}. No referenced record specified.`
+ );
+ errorCount++;
+ return Promise.resolve();
+ }
+ saveCondition = DBDef.SaveCondition.UpdateOnly;
+
+ const checker = (
+ snapshot: typeof item
+ ): Promise<{ result: boolean; errorMessage: string }> => {
+ const propToCheck = {
+ vmId: rec.reference.vmId,
+ scalingGroupName: rec.reference.scalingGroupName,
+ productName: rec.reference.productName,
+ algorithm: rec.reference.algorithm
+ };
+ const difference = this.dataDiff(propToCheck, snapshot);
+ return Promise.resolve({
+ result: Object.keys(difference).length === 0,
+ errorMessage:
+ Object.keys(difference)
+ .map(k => {
+ return `key: ${k}, expected: ${propToCheck[k]}, existing: ${difference[k]};`;
+ })
+ .join(' ') || ''
+ });
+ };
+ typeText =
+ `update existing item (checksum: ${rec.reference.checksum}). ` +
+ `Old values (filename: ${rec.reference.fileName}, ` +
+ `vmId: ${rec.reference.vmId}, ` +
+ `scalingGroupName: ${rec.reference.scalingGroupName}, ` +
+ `productName: ${rec.reference.productName}, ` +
+ `algorithm: ${rec.reference.algorithm}, ` +
+ `assignedTime: ${rec.reference.assignedTime}).` +
+ `New values (filename: ${item.fileName}, vmId: ${item.vmId}, ` +
+ `scalingGroupName: ${item.scalingGroupName}, ` +
+ `productName: ${item.productName}, algorithm: ${item.algorithm})`;
+ // NOTE: must ensure the consistency because the updating of the usage record
+ // is expected to happen with a race condition.
+ return this.adaptee
+ .saveItemToDb(table, item, saveCondition, checker)
+ .then(() => {
+ this.proxy.logAsInfo(typeText);
+ })
+ .catch(err => {
+ this.proxy.logForError(`Failed to ${typeText}.`, err);
+ errorCount++;
+ });
+ }
+ // create if record not exists
+ else {
+ saveCondition = DBDef.SaveCondition.InsertOnly;
+ typeText =
+ `create new item (checksum: ${item.checksum})` +
+ `New values (filename: ${item.fileName}, vmId: ${item.vmId}, ` +
+ `scalingGroupName: ${item.scalingGroupName}, ` +
+ `productName: ${item.productName}, algorithm: ${item.algorithm})`;
+ return this.adaptee
+ .saveItemToDb(table, item, saveCondition, false)
+ .then(() => {
+ this.proxy.logAsInfo(typeText);
+ })
+ .catch(err => {
+ this.proxy.logForError(`Failed to ${typeText}.`, err);
+ errorCount++;
+ });
+ }
+ })
+ );
+ if (errorCount > 0) {
+ this.proxy.logAsInfo('called updateLicenseUsage');
+ throw new Error(
+ `${errorCount} license usage record error occured. Please find the detailed logs above.`
+ );
+ }
+ this.proxy.logAsInfo('called updateLicenseUsage');
+ }
+ async loadLicenseFileContent(storageContainerName: string, filePath: string): Promise {
+ this.proxy.logAsInfo('calling loadLicenseFileContent');
+ const content = await this.adaptee.getBlobContent(storageContainerName, filePath);
+ this.proxy.logAsInfo('called loadLicenseFileContent');
+ return content;
+ }
+ // TODO: unused function as of this time
+ listNicAttachmentRecord(): Promise {
+ this.proxy.logAsInfo('calling listNicAttachmentRecord');
+ this.proxy.logAsInfo('this method is unused thus always returning an empty array.');
+ this.proxy.logAsInfo('called listNicAttachmentRecord');
+ return Promise.resolve([]);
+ }
+ // TODO: unused function as of this time
+ updateNicAttachmentRecord(vmId: string, nicId: string, status: string): Promise {
+ this.proxy.logAsInfo('calling updateNicAttachmentRecord');
+ this.proxy.logAsInfo(
+ 'this method is unused. parameter values passed here are:' +
+ ` vmId:${vmId}, nicId: ${nicId}, status: ${status}`
+ );
+ this.proxy.logAsInfo('called updateNicAttachmentRecord');
+ return Promise.resolve();
+ }
+ // TODO: unused function as of this time
+ deleteNicAttachmentRecord(vmId: string, nicId: string): Promise {
+ this.proxy.logAsInfo('calling deleteNicAttachmentRecord');
+ this.proxy.logAsInfo(
+ 'this method is unused. parameter values passed here are:' +
+ ` vmId:${vmId}, nicId: ${nicId}`
+ );
+ this.proxy.logAsInfo('called deleteNicAttachmentRecord');
+ return Promise.resolve();
+ }
+ // TODO: unused function as of this time
+ createNetworkInterface(
+ subnetId?: string,
+ description?: string,
+ securityGroups?: string[],
+ privateIpAddress?: string
+ ): Promise {
+ this.proxy.logAsInfo('calling createNetworkInterface');
+ this.proxy.logAsInfo(
+ 'this method is unused thus always returning null. parameter values passed here are:' +
+ ` subnetId?:${subnetId}, description?: ${description}` +
+ `, securityGroups?: ${securityGroups}, privateIpAddress?: ${privateIpAddress}`
+ );
+ this.proxy.logAsInfo('called createNetworkInterface');
+ return Promise.resolve(null);
+ }
+ // TODO: unused function as of this time
+ deleteNetworkInterface(nicId: string): Promise {
+ this.proxy.logAsInfo('calling deleteNetworkInterface');
+ this.proxy.logAsInfo(
+ 'this method is unused. parameter values passed here are:' + ` nicId: ${nicId}`
+ );
+ this.proxy.logAsInfo('called deleteNetworkInterface');
+ return Promise.resolve();
+ }
+ // TODO: unused function as of this time
+ attachNetworkInterface(vmId: string, nicId: string, index?: number): Promise {
+ this.proxy.logAsInfo('calling attachNetworkInterface');
+ this.proxy.logAsInfo(
+ 'this method is unused. parameter values passed here are:' +
+ ` vmId:${vmId}, nicId: ${nicId}, index: ${index}`
+ );
+ this.proxy.logAsInfo('called attachNetworkInterface');
+ return Promise.resolve();
+ }
+ // TODO: unused function as of this time
+ detachNetworkInterface(vmId: string, nicId: string): Promise {
+ this.proxy.logAsInfo('calling detachNetworkInterface');
+ this.proxy.logAsInfo(
+ 'this method is unused. parameter values passed here are:' +
+ ` vmId:${vmId}, nicId: ${nicId}`
+ );
+ this.proxy.logAsInfo('called detachNetworkInterface');
+ return Promise.resolve();
+ }
+ // TODO: unused function as of this time
+ listNetworkInterfaces(tags: ResourceFilter[], status?: string): Promise {
+ this.proxy.logAsInfo('calling listNetworkInterfaces');
+ this.proxy.logAsInfo(
+ 'this method is unused thus always returning an empty array. ' +
+ 'parameter values passed here are:' +
+ ` tags:${JSON.stringify(tags)}, status?: ${status}`
+ );
+ this.proxy.logAsInfo('called listNetworkInterfaces');
+ return Promise.resolve([]);
+ }
+ // TODO: unused function as of this time
+ tagNetworkInterface(nicId: string, tags: ResourceFilter[]): Promise {
+ this.proxy.logAsInfo('calling tagNetworkInterface');
+ this.proxy.logAsInfo(
+ 'this method is unused. parameter values passed here are:' +
+ ` nicId: ${nicId}, tags:${JSON.stringify(tags)}`
+ );
+ this.proxy.logAsInfo('called tagNetworkInterface');
+ return Promise.resolve();
+ }
+ async registerFortiAnalyzer(
+ vmId: string,
+ privateIp: string,
+ primary: boolean,
+ vip: string
+ ): Promise {
+ this.proxy.logAsInfo('calling registerFortiAnalyzer');
+ const table = new AzureFortiAnalyzer();
+ const item = table.downcast({
+ vmId: vmId,
+ ip: privateIp,
+ primary: primary,
+ vip: vip
+ });
+ await this.adaptee.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.Upsert,
+ false
+ );
+ this.proxy.logAsInfo('called registerFortiAnalyzer');
+ }
+
+ async invokeAutoscaleFunction(
+ payload: unknown,
+ functionEndpoint: string,
+ invocable: string,
+ executionTime?: number
+ ): Promise {
+ this.proxy.logAsInfo('calling invokeAutoscaleFunction');
+ const secretKey = this.createAutoscaleFunctionInvocationKey(
+ payload,
+ functionEndpoint,
+ invocable
+ );
+ const p: CloudFunctionInvocationPayload = constructInvocationPayload(
+ payload,
+ invocable,
+ secretKey,
+ executionTime
+ );
+
+ // NOTE: Autoscale leverages Azure Function access keys to ensure security
+ // see: https://docs.microsoft.com/en-us/azure/azure-functions/functions-bindings-http-webhook-trigger?tabs=csharp#authorization-keys
+ const reqHeaders = await this.proxy.getReqHeaders();
+ const reqQueryParams = await this.proxy.getReqQueryParameters();
+ const functionAccessKey =
+ reqHeaders['x-functions-key'] ||
+ reqQueryParams.code ||
+ process.env.FORTIANALYZER_HANDLER_ACCESS_KEY ||
+ null;
+ if (functionAccessKey) {
+ this.proxy.logAsInfo('function access key found. will invoke with access key.');
+ } else {
+ this.proxy.logAsInfo('function access key not found. will invoke as anonymous.');
+ }
+ const response = await this.adaptee.invokeAzureFunction(
+ functionEndpoint,
+ JSON.stringify(p),
+ functionAccessKey && String(functionAccessKey)
+ );
+ this.proxy.logAsInfo(`invocation response status code: ${response.status}`);
+ this.proxy.logAsInfo('called invokeAutoscaleFunction');
+ return response.status;
+ }
+ createAutoscaleFunctionInvocationKey(
+ payload: unknown,
+ functionEndpoint: string,
+ invocable: string
+ ): string {
+ const psk = this.settings.get(AzureFortiGateAutoscaleSetting.FortiGatePskSecret).value;
+ return genChecksum(
+ `${functionEndpoint}:${invocable}:${psk}:${JSON.stringify(payload)}`,
+ 'sha256'
+ );
+ }
+
+ async getSecretFromKeyVault(name: string): Promise {
+ try {
+ const decrypted = await this.adaptee.keyVaultGetSecret(name);
+ this.proxy.logAsInfo('Environment variable is decrypted. Use the decrpted value.');
+ return decrypted;
+ } catch (error) {
+ this.proxy.logAsWarning(
+ 'Unseccessfully decrypt the given varable, probably because ' +
+ 'the input is a non-encrypted value. Use its original value instead.'
+ );
+ throw error;
+ }
+ }
+
+ async saveLogs(logs: LogItem[]): Promise {
+ if (!logs) {
+ return;
+ }
+ let content = '';
+ logs.forEach(log => {
+ const args =
+ (log.arguments &&
+ log.arguments.map((arg, index) => {
+ const prefix = index > 0 ? `arg${index}: ` : '';
+ return `${prefix}${arg}`;
+ })) ||
+ [];
+ content =
+ `${content}` +
+ `${args.join('\n')}\n`;
+ });
+ const table = new AzureCustomLog();
+ const item = table.downcast({
+ id: undefined,
+ timestamp: undefined,
+ logContent: content
+ });
+ const save = (logItem: typeof item): Promise => {
+ const now = Date.now();
+ logItem.id = `${now}-${Math.round(Math.random() * 1000)}`;
+ logItem.timestamp = now;
+ return this.adaptee.saveItemToDb(
+ table,
+ item,
+ DBDef.SaveCondition.InsertOnly
+ );
+ };
+
+ try {
+ let tryAgainWhenFailed = false;
+ await save(item).catch(error => {
+ if (error instanceof DBDef.DbSaveError) {
+ tryAgainWhenFailed = true;
+ } else {
+ throw error;
+ }
+ });
+ if (tryAgainWhenFailed) {
+ await save(item);
+ }
+ } catch (error) {
+ this.proxy.logForError('Error in saving logs to CustomLog', error);
+ }
+ }
+
+ dataDiff(
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ dataToCheck: { [key: string]: any },
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ dataAgainst: { [key: string]: any }
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ ): { [key: string]: any } {
+ if (!dataAgainst) {
+ return dataToCheck;
+ }
+
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const diff: { [key: string]: any } = {};
+
+ Object.keys(dataToCheck)
+ .filter(k => dataToCheck[k] !== dataAgainst[k])
+ .forEach(k => {
+ diff[k] = dataAgainst[k];
+ });
+ return diff;
+ }
+}
diff --git a/core/azure/azure-routing-egress-traffic-via-primary-vm-strategy.ts b/core/azure/azure-routing-egress-traffic-via-primary-vm-strategy.ts
new file mode 100644
index 0000000..9802914
--- /dev/null
+++ b/core/azure/azure-routing-egress-traffic-via-primary-vm-strategy.ts
@@ -0,0 +1,29 @@
+import { AutoscaleEnvironment, CloudFunctionProxyAdapter, RoutingEgressTrafficStrategy } from '..';
+import { AzurePlatformAdapter } from '.';
+
+/**
+ * This strategy updates the route table associated with the private subnets which need outgoing
+ * traffic capability. It adds/replace the route to the primary FortiGate vm in the Autoscale cluster
+ * so the FortiGate can handle such egress traffic.
+ */
+export class AzureRoutingEgressTrafficViaPrimaryVmStrategy implements RoutingEgressTrafficStrategy {
+ protected platform: AzurePlatformAdapter;
+ protected proxy: CloudFunctionProxyAdapter;
+ protected env: AutoscaleEnvironment;
+ constructor(
+ platform: AzurePlatformAdapter,
+ proxy: CloudFunctionProxyAdapter,
+ env: AutoscaleEnvironment
+ ) {
+ this.platform = platform;
+ this.proxy = proxy;
+ this.env = env;
+ }
+ apply(): Promise {
+ this.proxy.logAsInfo('calling RoutingEgressTrafficViaPrimaryVmStrategy.apply');
+ // TODO: implementation needed.
+ this.proxy.logAsInfo('feature not yet implemented.');
+ this.proxy.logAsInfo('called RoutingEgressTrafficViaPrimaryVmStrategy.apply');
+ return Promise.resolve();
+ }
+}
diff --git a/core/azure/azure-tagging-autoscale-vm-strategy.ts b/core/azure/azure-tagging-autoscale-vm-strategy.ts
new file mode 100644
index 0000000..1ee1ae9
--- /dev/null
+++ b/core/azure/azure-tagging-autoscale-vm-strategy.ts
@@ -0,0 +1,43 @@
+import { CloudFunctionProxyAdapter, TaggingVmStrategy, VmTagging } from '..';
+import { AzurePlatformAdapter } from '.';
+
+export class AzureTaggingAutoscaleVmStrategy implements TaggingVmStrategy {
+ protected platform: AzurePlatformAdapter;
+ protected proxy: CloudFunctionProxyAdapter;
+ protected taggings: VmTagging[];
+ constructor(platform: AzurePlatformAdapter, proxy: CloudFunctionProxyAdapter) {
+ this.platform = platform;
+ this.proxy = proxy;
+ }
+ prepare(taggings: VmTagging[]): Promise {
+ this.taggings = taggings;
+ return Promise.resolve();
+ }
+ async apply(): Promise {
+ this.proxy.logAsInfo('calling AzureTaggingAutoscaleVmStrategy.apply');
+ const creationTaggings: VmTagging[] = this.taggings.filter(tagging => !tagging.clear);
+ const deletionTaggings: VmTagging[] = this.taggings.filter(tagging => tagging.clear);
+ if (creationTaggings.length > 0) {
+ await this.add(creationTaggings);
+ }
+ if (deletionTaggings.length > 0) {
+ await this.clear(deletionTaggings);
+ }
+ this.proxy.logAsInfo('calling AzureTaggingAutoscaleVmStrategy.apply');
+ }
+ add(taggings: VmTagging[]): Promise {
+ this.proxy.logAsInfo('calling AzureTaggingAutoscaleVmStrategy.add');
+ this.proxy.logAsInfo('skipped. not yet implemented.');
+ this.proxy.logAsInfo(`value passed to parameter: taggings: ${JSON.stringify(taggings)}.`);
+ this.proxy.logAsInfo('called AzureTaggingAutoscaleVmStrategy.add');
+ return Promise.resolve();
+ }
+
+ clear(taggings: VmTagging[]): Promise {
+ this.proxy.logAsInfo('calling AzureTaggingAutoscaleVmStrategy.clear');
+ this.proxy.logAsInfo('skipped. not yet implemented.');
+ this.proxy.logAsInfo(`value passed to parameter: taggings: ${JSON.stringify(taggings)}.`);
+ this.proxy.logAsInfo('called AzureTaggingAutoscaleVmStrategy.clear');
+ return Promise.resolve();
+ }
+}
diff --git a/core/azure/index.ts b/core/azure/index.ts
new file mode 100644
index 0000000..90949c1
--- /dev/null
+++ b/core/azure/index.ts
@@ -0,0 +1,17 @@
+import * as AzureFunctionDef from './azure-function-definitions';
+// export @fortinet/fortigate-autoscale
+// export * from '..';
+// export fortigate-autoscale-azure module files.
+export * from './azure-cloud-function-proxy';
+export * from './azure-db-definitions';
+export * from './azure-fortianalyzer-integration-service';
+export * from './azure-fortigate-autoscale';
+export * from './azure-fortigate-autoscale-settings';
+export * from './azure-fortigate-bootstrap-config-strategy';
+export * from './azure-function-invocable';
+export * from './azure-hybrid-scaling-group-strategy';
+export * from './azure-platform-adaptee';
+export * from './azure-platform-adapter';
+export * from './azure-routing-egress-traffic-via-primary-vm-strategy';
+export * from './azure-tagging-autoscale-vm-strategy';
+export { AzureFunctionDef };
diff --git a/core/blob.ts b/core/blob.ts
new file mode 100644
index 0000000..c25e577
--- /dev/null
+++ b/core/blob.ts
@@ -0,0 +1,5 @@
+export interface Blob {
+ content?: string;
+ filePath?: string;
+ fileName?: string;
+}
diff --git a/core/cloud-function-peer-invocation.ts b/core/cloud-function-peer-invocation.ts
new file mode 100644
index 0000000..4c77de2
--- /dev/null
+++ b/core/cloud-function-peer-invocation.ts
@@ -0,0 +1,43 @@
+import { JSONable } from './jsonable';
+
+export interface CloudFunctionInvocationPayload extends JSONable {
+ stringifiedData: string;
+ invocable: string;
+ invocationSecretKey: string;
+ executionTime?: number;
+}
+export interface CloudFunctionPeerInvocation {
+ proxy: TProxy;
+ platform: TPlatform;
+ executeInvocable(payload: CloudFunctionInvocationPayload, invocable: string): Promise;
+ handlePeerInvocation(functionEndpoint: string): Promise;
+}
+
+export class CloudFunctionInvocationTimeOutError extends Error {
+ extendExecution: boolean;
+ constructor(message?: string, extendExecution = false) {
+ super(message);
+ this.extendExecution = extendExecution;
+ }
+}
+
+export function constructInvocationPayload(
+ payload: unknown,
+ invocable: string,
+ secretKey: string,
+ executionTime?: number
+): CloudFunctionInvocationPayload {
+ const p: CloudFunctionInvocationPayload = {
+ stringifiedData: JSON.stringify(payload),
+ invocable: invocable,
+ invocationSecretKey: secretKey,
+ executionTime: executionTime
+ };
+ return p;
+}
+
+export function extractFromInvocationPayload(
+ invocationPayload: CloudFunctionInvocationPayload
+): unknown {
+ return invocationPayload.stringifiedData && JSON.parse(invocationPayload.stringifiedData);
+}
diff --git a/core/cloud-function-proxy.ts b/core/cloud-function-proxy.ts
new file mode 100644
index 0000000..c01ddc6
--- /dev/null
+++ b/core/cloud-function-proxy.ts
@@ -0,0 +1,202 @@
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum LogLevel {
+ Log = 'Log',
+ Info = 'Info',
+ Warn = 'Warn',
+ Error = 'Error',
+ Debug = 'Debug'
+}
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum DebugMode {
+ True = 'true',
+ DebugOnly = 'DebugOnly'
+}
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum ReqType {
+ BootstrapConfig = 'BootstrapConfig',
+ ByolLicense = 'ByolLicense',
+ CloudFunctionPeerInvocation = 'PeerFunctionInvocation',
+ CustomLog = 'CustomLog',
+ HeartbeatSync = 'HeartbeatSync',
+ LaunchedVm = 'LaunchedVm',
+ LaunchingVm = 'LaunchingVm',
+ ServiceProviderRequest = 'ServiceProviderRequest',
+ StatusMessage = 'StatusMessage',
+ TerminatedVm = 'TerminatedVm',
+ TerminatingVm = 'TerminatingVm',
+ VmNotLaunched = 'VmNotLaunched'
+}
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum ReqMethod {
+ GET,
+ POST,
+ PUT,
+ DELETE,
+ PATCH,
+ HEAD,
+ TRACE,
+ OPTIONS,
+ CONNECT
+}
+
+const reqMethod: Map = new Map([
+ ['GET', ReqMethod.GET],
+ ['POST', ReqMethod.POST],
+ ['PUT', ReqMethod.PUT],
+ ['DELETE', ReqMethod.DELETE],
+ ['PATCH', ReqMethod.PATCH],
+ ['HEAD', ReqMethod.HEAD],
+ ['TRACE', ReqMethod.TRACE],
+ ['OPTIONS', ReqMethod.OPTIONS],
+ ['CONNECT', ReqMethod.CONNECT]
+]);
+
+export function mapHttpMethod(s: string): ReqMethod {
+ return s && reqMethod.get(s.toUpperCase());
+}
+
+export interface ReqBody {
+ [key: string]: unknown;
+}
+
+export interface ReqHeaders {
+ [key: string]: unknown;
+}
+
+export type CloudFunctionResponseBody =
+ | string
+ | {
+ [key: string]: unknown;
+ }
+ | unknown;
+
+export interface CloudFunctionProxyAdapter {
+ formatResponse(
+ httpStatusCode: number,
+ body: CloudFunctionResponseBody,
+ headers: unknown
+ ): unknown;
+ log(message: string, level: LogLevel, ...others: unknown[]): void;
+ logAsDebug(message: string | DebugMode, ...others: unknown[]): void;
+ logAsInfo(message: string, ...others: unknown[]): void;
+ logAsWarning(message: string, ...others: unknown[]): void;
+ logAsError(message: string, ...others: unknown[]): void;
+ /**
+ * Output an Error level message containing the given message prefix, the error.message
+ * and error.stack of the given error.
+ *
+ * @param {string} messagePrefix
+ * @param {Error | string} error
+ * @memberof CloudFunctionProxyAdapter
+ */
+ logForError(messagePrefix: string, error: Error): void;
+ getRequestAsString(): Promise;
+ /**
+ * return the remaining execution time (in millisecond) of the current cloud function process.
+ *
+ * @returns {number}
+ * @memberof CloudFunctionProxyAdapter
+ */
+ getRemainingExecutionTime(): Promise;
+ getReqBody(): Promise;
+ /**
+ * get the HTTP headers object
+ *
+ * NOTE: header keys will be treated case-insensitive as per
+ the RFC https://tools.ietf.org/html/rfc7540#section-8.1.2
+ * @returns {Promise} headers objectt
+ */
+ getReqHeaders(): Promise;
+ getReqMethod(): Promise;
+}
+
+export abstract class CloudFunctionProxy
+ implements CloudFunctionProxyAdapter
+{
+ request: TReq;
+ context: TContext;
+ constructor(req: TReq, context: TContext) {
+ this.request = req;
+ this.context = context;
+ }
+ abstract log(message: string, level: LogLevel, ...others: unknown[]): void;
+ /**
+ * output log message as debug level.
+ * Only the first parameter 'message' will be shown normally.
+ * A hint message - '* more messages are hidden'.
+ * Add the process environment variable 'DEBUG_MODE' with value 'true' to show them.' - will
+ * be also shown in the output following the 'message' parameter.
+ * The rest parameters 'others' will be hidden.
+ * When process.env.DEBUG_MODE exists and set any value of string type ,
+ * the 'others' parameters will be shown too.
+ * Passing the value DebugMode.DebugOnly to the 'message' will hide all from showing in the log
+ * unless process.env.DEBUG_MODE exists.
+ * @param {string | DebugMode} message the message if passed a string type, will be shown if
+ * process.env.DEBUG_MODE exists and is set any value of string type. Passing DebugMode type
+ * can have special behaviors. (as describe above).
+ * @param {unknown[]} others the extra stuff to output via logAsDebug. These will be hidden
+ * from output if process.env.DEBUG_MODE doesn't exist (as described above).
+ * Otherwise, these will be shown.
+ * @returns {void}
+ */
+ logAsDebug(message: string | DebugMode, ...others: unknown[]): void {
+ const otherCount = (others && others.length) || 0;
+ const hint =
+ otherCount === 0
+ ? ''
+ : `${otherCount} more messages are hidden. Add the process environment` +
+ " variable 'DEBUG_MODE' with value 'true' to show them.";
+ // DEBUG_MODE exists in process.env.
+ if (process.env.DEBUG_MODE !== null && process.env.DEBUG_MODE !== undefined) {
+ // message will be shown in debug mode only
+ if (message === DebugMode.DebugOnly) {
+ return;
+ }
+ // show message, and others.
+ else {
+ this.log(message, LogLevel.Debug, ...others);
+ }
+ }
+ // DEBUG_MODE not exists in process.env.
+ else {
+ // don't sho anything if debug only
+ if (message === DebugMode.DebugOnly) {
+ return;
+ }
+ // otherwise, show message appended with a hint. others will be hidden.
+ else {
+ this.log(message ? `${message}. ${hint}` : hint, LogLevel.Debug);
+ }
+ }
+ }
+ logAsError(message: string, ...others: unknown[]): void {
+ this.log(message, LogLevel.Error, ...others);
+ }
+ logAsInfo(message: string, ...others: unknown[]): void {
+ this.log(message, LogLevel.Info, ...others);
+ }
+ logAsWarning(message: string, ...others: unknown[]): void {
+ this.log(message, LogLevel.Warn, ...others);
+ }
+ logForError(messagePrefix: string, error: Error, ...others: unknown[]): void {
+ const errMessage = error.message || '(no error message available)';
+ const errStack = (error.stack && ` Error stack:${error.stack}`) || '';
+
+ this.log(`${messagePrefix}. Error: ${errMessage}${errStack}`, LogLevel.Error, ...others);
+ }
+ abstract formatResponse(
+ httpStatusCode: number,
+ body: CloudFunctionResponseBody,
+ headers: unknown
+ ): TRes;
+ abstract getRequestAsString(): Promise;
+ abstract getRemainingExecutionTime(): Promise;
+ abstract getReqBody(): Promise;
+ abstract getReqHeaders(): Promise;
+ abstract getReqMethod(): Promise;
+ abstract getReqQueryParameters(): Promise<{ [name: string]: string }>;
+}
diff --git a/core/context-strategy/autoscale-context.ts b/core/context-strategy/autoscale-context.ts
new file mode 100644
index 0000000..dc809e7
--- /dev/null
+++ b/core/context-strategy/autoscale-context.ts
@@ -0,0 +1,1121 @@
+import { AutoscaleEnvironment } from '../autoscale-environment';
+import { AutoscaleSetting } from '../autoscale-setting';
+import { CloudFunctionProxyAdapter, LogLevel } from '../cloud-function-proxy';
+import { waitFor, WaitForConditionChecker, WaitForPromiseEmitter } from '../helper-function';
+import { PlatformAdapter } from '../platform-adapter';
+import {
+ HealthCheckRecord,
+ HealthCheckResult,
+ HealthCheckResultDetail,
+ HealthCheckSyncState as HeartbeatSyncState,
+ PrimaryRecord,
+ PrimaryRecordVoteState
+} from '../primary-election';
+import { VirtualMachine } from '../virtual-machine';
+
+export interface PrimaryElection {
+ oldPrimary?: VirtualMachine;
+ oldPrimaryRecord?: PrimaryRecord;
+ newPrimary: VirtualMachine;
+ newPrimaryRecord: PrimaryRecord;
+ candidate: VirtualMachine;
+ candidateHealthCheck?: HealthCheckRecord;
+ preferredScalingGroup?: string;
+ electionDuration?: number;
+ signature: string; // to identify a primary election
+}
+
+// the no-shadow rule errored in the next line may be just a false alarm
+// eslint-disable-next-line no-shadow
+export enum PrimaryElectionStrategyResult {
+ CannotDeterminePrimary = 'CannotDeterminePrimary',
+ CompleteAndContinue = 'CompleteAndContinue',
+ SkipAndContinue = 'SkipAndContinue'
+}
+export interface PrimaryElectionStrategy {
+ prepare(election: PrimaryElection): Promise;
+ result(): Promise;
+ apply(): Promise;
+ readonly applied: boolean;
+}
+
+export interface HeartbeatSyncStrategy {
+ prepare(vm: VirtualMachine): Promise;
+ apply(): Promise;
+ /**
+ * Force the target vm to go into 'out-of-sync' state. Autoscale will stop accepting its
+ * heartbeat sync request.
+ * @returns {Promise} void
+ */
+ forceOutOfSync(): Promise;
+ readonly targetHealthCheckRecord: HealthCheckRecord | null;
+ readonly healthCheckResult: HealthCheckResult;
+ readonly healthCheckResultDetail: HealthCheckResultDetail;
+ readonly targetVmFirstHeartbeat: boolean;
+}
+
+export interface VmTagging {
+ vmId: string;
+ newVm?: boolean;
+ newPrimaryRole?: boolean;
+ clear?: boolean;
+}
+
+export interface TaggingVmStrategy {
+ prepare(taggings: VmTagging[]): Promise;
+ apply(): Promise;
+}
+
+export interface RoutingEgressTrafficStrategy {
+ apply(): Promise;
+}
+
+/**
+ * To provide Autoscale basic logics
+ */
+export interface AutoscaleContext {
+ setPrimaryElectionStrategy(strategy: PrimaryElectionStrategy): void;
+ handlePrimaryElection(): Promise;
+ setHeartbeatSyncStrategy(strategy: HeartbeatSyncStrategy): void;
+ handleHeartbeatSync(): Promise;
+ setTaggingAutoscaleVmStrategy(strategy: TaggingVmStrategy): void;
+ handleTaggingAutoscaleVm(taggings: VmTagging[]): Promise