#!/usr/bin/python
# Version mark: 1548732dc82a409b7ac26c68de83e62a
#
#    Copyright (c) 2024-2025 JetStream Software Inc. <legal@jetstreamsoft.com>
#
#   Provides commands to manage ARO infrastructure in Google Cloud:
#
#       1) deploy AROVA VM (create network infrastructure if it is absent, create disks and VM, install software)
#       2) recovery AROVA VM in different zone/region
#       3) delete existing AROVA VM(s)
#       4) list existing AROVA infrastructure (network, router(s), disk(s), VM(s))
#

import argparse
import os
import os.path
import hashlib
import datetime
import struct
import socket
from typing import Optional

import getpass
import time

from enum import Enum
from subprocess import Popen, TimeoutExpired, PIPE, STDOUT

try:
    import requests
    from google.auth.exceptions import DefaultCredentialsError
    from google.api_core.exceptions import NotFound, Conflict, GoogleAPICallError, BadRequest, ServiceUnavailable
    from google.api_core.extended_operation import ExtendedOperation
    from google.cloud import compute_v1, resourcemanager_v3, iam_admin_v1, kms_v1
    from google.cloud.compute_v1.types.compute import (Allowed, AttachedDisk,
            AttachedDiskInitializeParams, CustomerEncryptionKey, Disk,
            DiskResourceStatusAsyncReplicationStatus, Firewall, Image, Instance,
            ListInstancesRequest, ListSubnetworksRequest, Network, NetworkInterface,
            RegionDisksStartAsyncReplicationRequest, Route, Router, RouterNat,
            ServiceAccount, Subnetwork, Zone)
    from google.cloud.resourcemanager_v3 import Organization, Project
    from google.cloud.iam_admin_v1.types import ServiceAccount as iamServiceAccount, Role
    from google.iam.v1 import iam_policy_pb2
    from google.cloud.kms_v1.types import CryptoKey

    # Alias a very long type name.
    AsyncReplState = DiskResourceStatusAsyncReplicationStatus.State
except Exception as x:
    print(f"""Exception loading modules: {x}
Please install dependencies using command:
    pip install google.cloud google.cloud.compute google.cloud.iam \
        google.cloud.kms google.cloud.resource.manager requests
""")
    exit(1)

class AROVAVM_CONSTS:
    USED_COMMANDS=["wget", "gcloud", "sha256sum"]
    BASE_URL = "https://aroms.jetstreamsoft.com/AROVA/"
    BUNDLE_VERSION_NAME = "gcp-bundle.version"
    BUNDLE_NAME = "gcp-bundle.tar.xz"
    CLI_URL = f"{BASE_URL}arova-cli.py"
    CLI_VERSION_MARK = "# Version mark:"
    BUNDLE_NAME_CHECKSUM_FILE = BUNDLE_NAME + ".sha256"
    ALL_BUNDLE_NAMES = [BUNDLE_VERSION_NAME, BUNDLE_NAME, BUNDLE_NAME_CHECKSUM_FILE]
    CMD_GET_BUNDLE_VERSION = ["wget", "-q", "--show-progress", "-c",
                              "-O", BUNDLE_VERSION_NAME, f"{BASE_URL}{BUNDLE_VERSION_NAME}"]
    CMD_DOWNLOAD_BUNDLE = ["wget", "-q", "--show-progress", "-c", "--progress=dot:mega",
                           "-O", BUNDLE_NAME, f"{BASE_URL}{BUNDLE_NAME}"]
    CMD_GET_BUNDLE_SHA256 = ["wget", "-q", "--show-progress", "-c",
                             "-O", BUNDLE_NAME_CHECKSUM_FILE, f"{BASE_URL}{BUNDLE_NAME_CHECKSUM_FILE}"]
    CMD_CHECK_BUNDLE = ["sha256sum", "-c", BUNDLE_NAME_CHECKSUM_FILE]
    DIR_TO_EXTRACT = "~/current"
    CMD_SCP = ["gcloud", "compute", "scp", "--tunnel-through-iap", "--scp-flag=-oBatchMode=yes" ]
    CMD_SSH = ["gcloud", "compute", "ssh", "--ssh-flag=-oBatchMode=yes" ]
    CMD_IAP_ACCESS = "gcloud compute start-iap-tunnel {instance} 443 --local-host-port=localhost:8443 --zone={zone} --project {project}"
    CMD_CREATE_TMP_DIR = "--command=mkdir -p ~/{version} && ln -s {version} " + DIR_TO_EXTRACT
    CMD_EXTRACT_BUNDLE = [ f"--command=tar -xJvf /tmp/{BUNDLE_NAME} -C {DIR_TO_EXTRACT}" ]
    # NB! Command is processed to string before invocation, so ssh command must be quoted.
    CMD_CHECK_VM = [ f"--command=hostnamectl hostname" ]
    CMD_UPDATE_SSH_KEYS = [ "gcloud", "compute", "config-ssh" ]
    CMD_RUN_AROVA_TO_DEPLOY = [ f"--command=cd {DIR_TO_EXTRACT}/docker && sudo {DIR_TO_EXTRACT}/docker/setup-deploy-arova.sh" ]
    CMD_RUN_AROVA_TO_RECOVERY = [ f"--command=cd {DIR_TO_EXTRACT}/docker && sudo {DIR_TO_EXTRACT}/docker/setup-recovery-arova.sh" ]
    # Setup command with format placeholders.
    CMD_SETUP_AROVA_TO_DEPLOY = "--command=cd " + DIR_TO_EXTRACT + "/docker && sudo " + DIR_TO_EXTRACT + "/docker/config-deploy-arova.sh {src_reg} {disk_id} {disk_name} {dest_region} {dest_zone_pri} {dest_zone_sec} {subscription_id} {kms_key_name}"
    CMD_SETUP_AROVA_TO_RECOVERY = "--command=cd " + DIR_TO_EXTRACT + "/docker && sudo " + DIR_TO_EXTRACT + "/docker/config-recovery-arova.sh {operation_type} {src_reg} {disk_id} {disk_name} {dest_zone_pri} {dest_zone_sec} {kms_key_name}"
    # Check subscription
    #URL_CHECK_SUBSCRIPTION = "https://aroms.jetstreamsoft.com/api/1.0/status"
    URL_CHECK_SUBSCRIPTION = "https://api-aroms.jetstreamsoft.com/web/v1/status/api"
    AROVA_ID_TO_CHECK = "00000000-0000-0000-0000-000000000001"
    EXEC_LOG=os.path.join(os.getcwd(), "execution.log")
    DOTS_IN_LINE = 80

class GCLOUD_CONSTS:
    AROVA_FIREWALL_SSH_RULE = "allow-access"
    NAT_ROUTER_PREFIX = "nat-router"
    CLOUDNAT_ROUTER_PREFIX = "cloud-nat"
    DEFAULT_ROUTE_SUFFIX = "default-route"
    DISK_SIZE_BOOT_GB = 20 #size is in Gb
    DISK_SIZE_ADC_GB = 30 #size is in Gb
    DISK_NAME_BOOT_PREFIX = "jet-aro-boot"
    DISK_NAME_ADC_PREFIX = "jet-aro-data"
    VM_NAME_PREFIX = "jet-aro-vm"

    DISK_LABEL_SRC_REGION = "src-region"
    DISK_LABEL_DEST_REGION = "dest-region"
    DISK_LABEL_IS_PRIMARY = "is-primary"
    DISK_LABEL_SRC_ZONE1 = "src-zone1"
    DISK_LABEL_SRC_ZONE2 = "src-zone2"
    DISK_LABEL_DEST_ZONE1 = "dest-zone1"
    DISK_LABEL_DEST_ZONE2 = "dest-zone2"
    DISK_LABEL_AROVA_DISK = "is-arova-conf-disk"

    AROVA_VM_LABEL = "is-arova-vm"
    AROVA_VM_LABEL_SRC_REGION = "src-region"
    AROVA_VM_LABEL_DEST_REGION = "dest-region"

    AROVA_VM_SEARCH_CRITERIA = "labels.{vm_label}:true AND labels.{src_region_label}:{src_region} AND labels.{dest_region_label}:{dest_region}"

    GNETWORK_TEMPLATE = "https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}"
    PROJECT_DEFAULT_GATEWAY = "https://www.googleapis.com/compute/v1/projects/{}/global/gateways/default-internet-gateway"
    REPLICA_ZONES_TEMPLATE = "https://www.googleapis.com/compute/v1/projects/{}/zones/{}"
    DISK_TYPE_REGIONAL_PD_BALANCED_TEMPLATE = "https://www.googleapis.com/compute/v1/projects/{}/regions/{}/diskTypes/pd-balanced"
    DISK_TYPE_ZONE_PD_PALANCED_TEMPLATE = "https://www.googleapis.com/compute/v1/projects/{}/zones/{}/diskTypes/pd-balanced"
    MACHINE_TYPE_TEMPLATE = "https://www.googleapis.com/compute/v1/projects/{}/zones/{}/machineTypes/{}"
    KMS_KEY_NAME_TEMPLATE = "projects/{project}/locations/{location}/keyRings/{key_ring_name}/cryptoKeys/{key_name}"
    KMS_LOCATION_NAME_TEMPLATE = "projects/{project}/locations/{location}"
    REGION_URL_TEMPLATE = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}"

    NETWORK_NAME = "arova"
    # See https://cloud.google.com/iap/docs/using-tcp-forwarding#create-firewall-rule
    NETWORK_IAP_RANGE = "35.235.240.0/20"

    PERMISSIONS_LIST = [
        "cloudkms.keyRings.list",
        "cloudkms.cryptoKeys.list",
        "cloudkms.cryptoKeyVersions.useToDecrypt",
        "cloudkms.cryptoKeyVersions.useToEncrypt",
        "compute.addresses.list",
        "compute.addresses.use",
        "compute.addresses.useInternal",
        "compute.disks.addResourcePolicies",
        "compute.disks.create",
        "compute.disks.delete",
        "compute.disks.get",
        "compute.disks.list",
        "compute.disks.removeResourcePolicies",
        "compute.disks.setLabels",
        "compute.disks.startAsyncReplication",
        "compute.disks.stopAsyncReplication",
        "compute.disks.stopGroupAsyncReplication",
        "compute.disks.update",
        "compute.disks.use",
        "compute.disks.useReadOnly",
        "compute.images.get",
        "compute.images.useReadOnly",
        "compute.instances.addAccessConfig",
        "compute.instances.addMaintenancePolicies",
        "compute.instances.addResourcePolicies",
        "compute.instances.attachDisk",
        "compute.instances.create",
        "compute.instances.delete",
        "compute.instances.detachDisk",
        "compute.instances.get",
        "compute.instances.getGuestAttributes",
        "compute.instances.getIamPolicy",
        "compute.instances.list",
        "compute.instances.setDeletionProtection",
        "compute.instances.setDiskAutoDelete",
        "compute.instances.setIamPolicy",
        "compute.instances.setLabels",
        "compute.instances.setMachineResources",
        "compute.instances.setMachineType",
        "compute.instances.setMetadata",
        "compute.instances.setMinCpuPlatform",
        "compute.instances.setName",
        "compute.instances.setScheduling",
        "compute.instances.setSecurityPolicy",
        "compute.instances.setServiceAccount",
        "compute.instances.setShieldedInstanceIntegrityPolicy",
        "compute.instances.setShieldedVmIntegrityPolicy",
        "compute.instances.setTags",
        "compute.instances.simulateMaintenanceEvent",
        "compute.instances.start",
        "compute.instances.stop",
        "compute.instances.update",
        "compute.instances.updateAccessConfig",
        "compute.instances.updateDisplayDevice",
        "compute.instances.updateNetworkInterface",
        "compute.instances.updateSecurity",
        "compute.instances.updateShieldedInstanceConfig",
        "compute.instances.updateShieldedVmConfig",
        "compute.instances.use",
        "compute.instanceTemplates.useReadOnly",
        "compute.machineImages.useReadOnly",
        "compute.machineTypes.get",
        "compute.machineTypes.list",
        "compute.networks.get",
        "compute.networks.list",
        "compute.networks.use",
        "compute.networks.useExternalIp",
        "compute.projects.get",
        "compute.regionOperations.get",
        "compute.regions.get",
        "compute.reservations.list",
        "compute.resourcePolicies.create",
        "compute.resourcePolicies.delete",
        "compute.resourcePolicies.get",
        "compute.resourcePolicies.list",
        "compute.resourcePolicies.use",
        "compute.resourcePolicies.useReadOnly",
        "compute.serviceAttachments.getIamPolicy",
        "compute.snapshots.useReadOnly",
        "compute.subnetworks.get",
        "compute.subnetworks.list",
        "compute.subnetworks.use",
        "compute.subnetworks.useExternalIp",
        "compute.zoneOperations.get",
        "compute.zones.list",
        "iam.serviceAccounts.actAs",
        "iam.serviceAccounts.get",
        "iam.serviceAccounts.list",
        "iam.serviceAccounts.setIamPolicy",
        "logging.logEntries.create",
        "logging.logMetrics.create",
        "monitoring.timeSeries.list",
        "osconfig.guestPolicies.create",
        "osconfig.guestPolicies.delete",
        "osconfig.guestPolicies.get",
        "osconfig.guestPolicies.list",
        "osconfig.guestPolicies.update",
        "resourcemanager.projects.get",
        "resourcemanager.projects.getIamPolicy",

    ]
    ORG_PERMISSIONS_LIST = [
        "resourcemanager.projects.list"
    ]

project_id = ""
rollback_fns = []
keep_after_fail = False

class FirewallState(Enum):
    ABSENT = 1
    DISABLED = 2
    ENABLED = 3

class ArovaCliExceptionBase(Exception):
    pass

class ArovaCleanupError(ArovaCliExceptionBase):
    pass

class IllegalArgumentError(ArovaCliExceptionBase):
    pass

class WrapInvokerException(ArovaCliExceptionBase):
    def __init__(self, invoker_exception: Exception):
        super().__init__(str(invoker_exception))
        self.invoker_exception = invoker_exception

class BadZoneException(ArovaCliExceptionBase):
    def __init__(self, message, zone):
        super().__init__(message)
        self.zone = zone

class NoCommonRegionException(ArovaCliExceptionBase):
    def __init__(self, message, pri_zone, sec_zone):
        super().__init__(message)
        self.primary_zone = pri_zone
        self.secondary_zone = sec_zone

class ExternalProcessException(ArovaCliExceptionBase):
    def __init__(self, execCommand: str | list[str], return_code: int, stdout: str):
        cmd = " ".join(execCommand) if isinstance(execCommand, list) else execCommand
        super().__init__(f"Process '{cmd}' failed with return code {return_code}")
        self.command = return_code
        self.return_code = return_code
        self.output = stdout
    def __str__(self):
        ret = super.__str__(self)
        if self.output:
            ret += "\n--------- Process output -------\n" + self.output
        return ret

class InvalidNetworkException(ArovaCliExceptionBase):
    pass

class BadPasswordException(ArovaCliExceptionBase):
    pass

class DownloadFailedException(ArovaCliExceptionBase):
    pass

class NewVersionAvailableException(ArovaCliExceptionBase):
    pass

class GcloudAuthException(ArovaCliExceptionBase):
    def __init__(self, invoker_exception: Exception):
        super().__init__(
            "Please run 'gcloud auth application-default login' to provide app-default credentials.\n" +
            str(invoker_exception))
        self.invoker_exception = invoker_exception

own_hash = ""

def selfcheck():
    file_hash = hashlib.md5()
    expected_hash = ""
    with open(__file__, "r") as f:
        for line in f.readlines():
            if line.startswith(AROVAVM_CONSTS.CLI_VERSION_MARK):
                expected_hash = line.split(':')[1].strip()
                continue
            elif line.startswith("#"):
                continue
            file_hash.update(line.encode())
    real_hash = file_hash.hexdigest()
    if real_hash != expected_hash:
        print("Warning: the script was modified from the original version.")
    global own_hash
    own_hash = real_hash

def remotecheck():
    remote_hash = ""
    headers = {"Range": "bytes=0-256"}
    r = requests.get(AROVAVM_CONSTS.CLI_URL, headers=headers)
    for rmtLine in r.text.splitlines():
        if rmtLine.startswith(AROVAVM_CONSTS.CLI_VERSION_MARK):
            remote_hash = rmtLine.split(':')[1].strip()
            break
    if own_hash and remote_hash:
        if remote_hash != own_hash:
            print(f"New version of CLI is available at {AROVAVM_CONSTS.CLI_URL}")
            reply = input("Enter 'yes' if you want to continue with current version:")
            if reply != 'yes':
                raise NewVersionAvailableException()

def check_required_utils():
    warnings = []
    for utility in AROVAVM_CONSTS.USED_COMMANDS:
        try:
            # 'command' is a builtin in google cloud shell
            runCommand(f'command -v {utility}', shell=True)
        except ExternalProcessException:
            warnings += [utility]
        except FileNotFoundError:
            warnings += ['command']
            break
    if warnings:
        print(f"""Please install missing utilities: {",".join(warnings)}""")
        exit(1)
    if os.path.exists(AROVAVM_CONSTS.EXEC_LOG):
        new_name = AROVAVM_CONSTS.EXEC_LOG + datetime.datetime.now().isoformat(timespec='seconds')
        print(f"Warning: renaming old execution log to {new_name}")
        os.rename(AROVAVM_CONSTS.EXEC_LOG, new_name)

def limit_dots(current: int) -> str:
    return "\n" if (current % AROVAVM_CONSTS.DOTS_IN_LINE) == (AROVAVM_CONSTS.DOTS_IN_LINE - 1) else ""

def wait(operation: ExtendedOperation):
    try:
        print("Waiting", end="", flush=True)
        dots_in_line = 0
        while operation.running():
            dots_in_line += 1
            print(".", end=limit_dots(dots_in_line), flush=True)
            time.sleep(10)
        operation.result()
        if operation.error_code:
            raise operation.exception()

        print("", flush=True)
    except Exception as e:
        print("", flush=True)
        if isinstance(e, GoogleAPICallError):
            raise WrapInvokerException(e)
        else:
            raise

def _parseGoogleUrl(resource_url: str, *fields: str | list[str]) -> list[str]:
    # Parses the URL for the specific fields w/o respect for anything extra
    # or a different fields order. We just want the parts to uniquely exist.
    if any(x.isspace() for x in resource_url):
        raise IllegalArgumentError(f"URL cannot contain whitespaces - '{resource_url}'")
    prefix = "https://www.googleapis.com/compute/v1/"
    if not resource_url.startswith(prefix):
        raise IllegalArgumentError(f"Not a resource URL {resource_url}")
    result = []
    named_parts = {}
    parts = resource_url[len(prefix):].split("/")
    if len(parts)%2 != 0:
        raise IllegalArgumentError(f"Unexpected resource URL format {resource_url}")
    for name, part in zip(parts[::2], parts[1::2]):
        if name in named_parts:
            raise IllegalArgumentError(f"Unexpected double descriptor in resource URL {resource_url}")
        named_parts[name] = part
    for name in fields:
        if isinstance(name, list):
            for iname in name:
                if iname in named_parts:
                    result.append(named_parts[iname])
                    break
            else:
                raise IllegalArgumentError(f"Resource URL {resource_url} does not contain either of: {','.join(name)}")
        else:
            if not name in named_parts:
                raise IllegalArgumentError(f"Resource URL {resource_url} does not contain '{name}'")
            result.append(named_parts[name])
    return result

def parseSubnetUrl(subnet_url: str) -> list[str]:
    # Accept the subnet URL, extract region and name.
    # "'https://www.googleapis.com/compute/v1/projects/blc-01/regions/africa-south1/subnetworks/default'"
    return _parseGoogleUrl(subnet_url, "regions", "subnetworks")

def parseDiskUrlToParameters(disk_url: str) -> list[str]:
    # Accept the disk URL, extract project/region/name.
    # https://www.googleapis.com/compute/v1/projects/project/regions/us-east1/disks/jet-aro-data-us-east1-us-west1
    # Disk can be zonal or regional, thus the choice.
    return _parseGoogleUrl(disk_url, "projects", ["regions", "zones"], "disks")

def parseZoneUrlToParameters(vm_zone: str) -> list[str]:
    # Accept the VM zone URL, extract project/zone.
    # 'https://www.googleapis.com/compute/v1/projects/projectname/zones/us-east1-b'
    return _parseGoogleUrl(vm_zone, "projects", "zones")

def getZoneNames(project: str) -> list[str]:
    zones = []
    try:
        client = compute_v1.ZonesClient()
        request = compute_v1.ListZonesRequest(
            project=project,
        )
        pager = client.list(request=request)
        for zone_info in pager:
            zones += [zone_info.name]
    except Exception as e:
        print(e)
    return zones

def getNetwork(name: str) -> Network:
    try:
        client = compute_v1.NetworksClient()
        return client.get(project=project_id, network=name)
    except NotFound as e:
        return None
    except Exception as e:
        raise WrapInvokerException(e)

def createNetwork(name: str) -> Network:
    client = compute_v1.NetworksClient()
    network = Network()
    network.name = name
    network.auto_create_subnetworks = False
    network.description = "AROVA network"
    network.routing_config.routing_mode = "REGIONAL"

    try:
        wait(client.insert(project=project_id, network_resource=network))
    except Exception as e:
        raise WrapInvokerException(e)

    global rollback_fns
    client_here = client
    name_here = name
    remove_fn = lambda client=client_here, name=name_here: wait(client.delete(project=project_id, network=name))
    rollback_fns += [ [f"Deleting network '{name}'", remove_fn ] ]

    return getNetwork(name)


class CidrProcessor:
    def __init__(self, global_ip_range: str = "10.0.0.0/8", subnet_span:int = 25):
        self.used_ranges = []
        self.subnet_span = subnet_span
        self.global_start, self.global_span = self._parse_ip_range(global_ip_range)
        self.global_range = global_ip_range

    def _normalize(self):
        idx = 0
        while idx < len(self.used_ranges) - 1:
            this = self.used_ranges[idx]
            next = self.used_ranges[idx + 1]
            if this[0] + this[1] == next[0]:
                self.used_ranges[idx][1] += next[1]
                self.used_ranges.remove(next)
            else:
                idx += 1

    def _insert(self, subnet: str, ip_as_int: int, span: int):
        insert_idx = 0
        for idx in range(len(self.used_ranges)):
            xrange = self.used_ranges[idx]
            if xrange[0] < ip_as_int:
                insert_idx = idx + 1
                if xrange[0] + xrange[1] > ip_as_int:
                    raise InvalidNetworkException(f"Intersecting IP range detected in subnet {subnet}")
            elif xrange[0] > ip_as_int:
                if ip_as_int + span > xrange[0]:
                    raise InvalidNetworkException(f"Intersecting IP range detected in subnet {subnet}")
            else:
                raise InvalidNetworkException(f"Intersecting IP range detected in subnet {subnet}")
        self.used_ranges.insert(insert_idx, [ip_as_int, span])
        self._normalize()

    @staticmethod
    def _parse_ip_range(ip_range: str):
        try:
            parts = ip_range.split('/')
            if len(parts) != 2:
                raise ValueError(ip_range)
            nmask = int(parts[1])
        except ValueError:
            raise InvalidNetworkException(f"Invalid format of IP range {ip_range}")
        if not (0 <= nmask <= 32):
            raise InvalidNetworkException(f"Invalid netmask in IP range {ip_range}")
        span = 2**(32-nmask)
        try:
            ip_as_int = struct.unpack('!I', socket.inet_pton(socket.AF_INET, parts[0]))[0]
        except ValueError:
            raise InvalidNetworkException(f"Invalid IP address in IP range {ip_range}")
        if ip_as_int % span != 0:
            raise InvalidNetworkException(f"IP address not aligned in IP range {ip_range}")
        return ip_as_int, span

    def addRange(self, subnet: str, ip_range: str):
        ip_as_int, span = self._parse_ip_range(ip_range)
        self._insert(subnet, ip_as_int, span)

    def getRange(self) -> str:
        # find next /{self.subnet_span} address range within {self.global_range}
        startip = self.global_start
        lastip = self.global_start + self.global_span
        span = 2**(32 - self.subnet_span)
        for iprange in self.used_ranges:
            if startip + span <= iprange[0]:
                break
            startip = iprange[0] + iprange[1]
            if startip % span != 0:
                startip += span - (startip % span)
        if startip >= lastip:
            raise InvalidNetworkException(f"No free /{self.subnet_span} range in {self.global_range}")

        self._insert("new subnet", startip, span)

        cidr_str = ""
        for idx in range(4):
            cidr_str = str(startip % 256) + '.' + cidr_str
            startip //= 256
        return cidr_str[:-1] + "/" + str(self.subnet_span)


# Warning: modifies and returns the regions parameter.
def checkAndCreateSubnets(network: Network, regions: list[str], create: bool) -> list[str]:
    subclient = compute_v1.SubnetworksClient()
    cidr = CidrProcessor()
    print(f"Checking subnetworks in network '{network.name}'")
    for subnet in network.subnetworks:
        region, subname = parseSubnetUrl(subnet)
        if region in regions:
            regions.remove(region)
        try:
            sndata = subclient.get(project=project_id, region=region, subnetwork=subname)
            if sndata.ip_cidr_range:
                cidr.addRange(f'{region}/{subname}', sndata.ip_cidr_range)
        except Exception as e:
            raise WrapInvokerException(e)

    if not regions:
        return []
    if not create:
        return regions

    for region in regions:
        print(f"Creating subnet of network '{network.name}' in region {region}")

        subnet = Subnetwork()
        subnet.description = f"AROVA subnet for region {region}"
        subnet.name = f"{network.name}-{region}"
        subnet.network = network.self_link
        subnet.region = GCLOUD_CONSTS.REGION_URL_TEMPLATE.format(project=project_id, region=region)
        subnet.stack_type = "IPV4_ONLY"
        subnet.private_ip_google_access = False
        subnet.ip_cidr_range = cidr.getRange()
        try:
            wait(subclient.insert(project=project_id, region=region, subnetwork_resource=subnet))
        except Exception as e:
            raise WrapInvokerException(e)

        global rollback_fns
        client_here = subclient
        name_here = subnet.name
        region_here = region
        remove_fn = lambda client=client_here, name=name_here, region=region_here: \
            wait(client.delete(project=project_id, subnetwork=name, region=region))
        rollback_fns += [[f"Deleting subnetwork '{name_here}'", remove_fn]]
    return []


def correctDefaultAccessRule(network: str, sa: str):
    try:
        client = compute_v1.FirewallsClient()
        expected_name = f"{network}-{GCLOUD_CONSTS.AROVA_FIREWALL_SSH_RULE}"
        list_request = compute_v1.types.ListFirewallsRequest(
            project=project_id,
            filter=f'name={expected_name}'
        )
        for rule in client.list(list_request):
            if not rule.network.endswith(f"/{network}"):
                msg = f"Unexpected network {rule.network} in firewall rule {expected_name}"
                raise IllegalArgumentError(msg)
            if GCLOUD_CONSTS.NETWORK_IAP_RANGE not in rule.source_ranges and \
                    "0.0.0.0/0" not in rule.source_ranges:
                rule.source_ranges += [ GCLOUD_CONSTS.NETWORK_IAP_RANGE ]
            rule.disabled = False
            if rule.source_service_accounts and sa not in rule.source_service_accounts:
                rule.source_service_accounts += [ sa ]
            has_ssh = False
            has_https = False
            for allowed in rule.allowed:
                if allowed.I_p_protocol != "tcp":
                    continue
                if "22" in allowed.ports:
                    has_ssh = True
                if "443" in allowed.ports:
                    has_https = True
            if not has_ssh or not has_https:
                allowed = Allowed()
                allowed.I_p_protocol = "tcp"
                allowed.ports = ["22", "443"]
                rule.allowed += [ allowed ]
            wait(client.patch(project=project_id, firewall=rule.name, firewall_resource=rule))
            # Process only the first rule in the list - but there can be only one.
            break
    except Exception as e:
        raise WrapInvokerException(e)


def findAccessFirewallRule(network: str, sa: str) -> Firewall:
    # Finds first firewall rule to:
    # - work with TCP ports 22 and 443
    # - allowing access from IAP or global
    # - bound to the expected service account.
    client = compute_v1.FirewallsClient()
    list_request = compute_v1.types.ListFirewallsRequest(
        project = project_id,
        filter = f'network="https://www.googleapis.com/compute/v1/projects/{project_id}/global/networks/{network}"'
    )
    last_disabled_rule = None
    for rule in client.list(list_request):
        if rule.allowed is None or not rule.network.endswith(f"networks/{network}"):
            continue
        if rule.source_service_accounts and sa not in rule.source_service_accounts:
            continue
        if GCLOUD_CONSTS.NETWORK_IAP_RANGE not in rule.source_ranges and \
                               "0.0.0.0/0" not in rule.source_ranges:
            continue
        has_ssh = False
        has_https = False
        for allowed in rule.allowed:
            if allowed.I_p_protocol != "tcp":
                continue
            if "22" in allowed.ports:
                has_ssh = True
            if "443" in allowed.ports:
                has_https = True
        if not has_ssh or not has_https:
            continue
        if rule.disabled:
            last_disabled_rule = rule
        else:
            print(f"Rule '{rule.name}' can be used for access to AROVA")
            return rule

    return last_disabled_rule

def checkAccessFirewallRule(network: str, sa: str) -> FirewallState:
    try:
        rule = findAccessFirewallRule(network, sa)
        if rule:
            if rule.disabled:
                return FirewallState.DISABLED
            return FirewallState.ENABLED
    except Exception as e:
        print(e)

    return FirewallState.ABSENT

def enableFirewallAccessRule(network: str, sa: str):
    try:
        rule = findAccessFirewallRule(network, sa)
        client = compute_v1.FirewallsClient()
        updatedRule = Firewall()
        updatedRule.disabled = False
        wait(client.patch(project=project_id, firewall=rule.name, firewall_resource=updatedRule))
    except Exception as e:
        raise WrapInvokerException(e)

def createAccessFirewallRule(network: str, sa: str):
    rule_name = f"{network}-{GCLOUD_CONSTS.AROVA_FIREWALL_SSH_RULE}"
    try:
        client = compute_v1.FirewallsClient()
        rule = Firewall()
        rule.name = rule_name
        rule.network = GCLOUD_CONSTS.GNETWORK_TEMPLATE.format(project_id, network)
        allowed = Allowed()
        allowed.I_p_protocol = "tcp"
        allowed.ports = [ "22", "443" ]
        rule.allowed = [ allowed ]
        rule.disabled = False
        # Allow ingress from IAP
        rule.source_ranges = [ GCLOUD_CONSTS.NETWORK_IAP_RANGE ]
        rule.source_service_accounts = [ sa ]

        #TODO: call return google.api_core.extended_operation.ExtendedOperation. Add check status
        wait(client.insert(project=project_id, firewall_resource=rule))

        global rollback_fns
        client_here = client
        name_here = rule.name
        remove_fn = lambda client=client_here, name=name_here: wait(client.delete(project=project_id, firewall=name))
        rollback_fns += [ [f"Deleting firewall rule '{rule_name}'" ,remove_fn ] ]
    except Conflict as e:
        print(f'Rule with name {rule_name} already exists, trying to amend')
        correctDefaultAccessRule(network, sa)
    except Exception as e:
        raise WrapInvokerException(e)

def getNATRouterName(network: str, region: str) -> str:
    try:
        client = compute_v1.RoutersClient()
        for router in client.list(project=project_id, region=region):
            if router.network is None:
                continue

            index = router.network.rfind("/")
            router_net = router.network[index + 1:] if index != -1 else router.network
            if router_net == network:
                return router.name
    except Exception as e:
        print(e)

    # Empty string compares to False.
    return ""

def createNATRouter(network, region):
    try:
        client = compute_v1.RoutersClient()
        router = Router()
        router.name = f"{network}-{GCLOUD_CONSTS.NAT_ROUTER_PREFIX}-{region}"
        router.network = GCLOUD_CONSTS.GNETWORK_TEMPLATE.format(project_id, network)

        wait(client.insert(project=project_id, region=region, router_resource=router))

        global rollback_fns
        client_here = client
        region_here = region
        name_here = router.name
        remove_fn = lambda client=client_here, name=name_here, region=region_here: wait(client.delete(project=project_id, region=region, router=name))
        rollback_fns += [ [ f"Deleting router '{router.name}'", remove_fn ] ]

        return router.name
    except Exception as e:
        raise WrapInvokerException(e)

def findNATRouter(region, router):
    client = compute_v1.RoutersClient()
    for r in client.list(project=project_id, region=region):
        if r.name and r.name == router:
            return r

    return None

def checkCloudNAT(region: str, router: str) -> bool:
    # Required for outgoing communication with AroMS for billing.
    try:
        r = findNATRouter(region, router)
        return r and r.nats and len(r.nats) != 0

    except Exception as e:
        print(e)

    return False

def createCloudNAT(network, region, router):
    try:
        r = findNATRouter(region, router)
        if not r:
            print(f"Router {router} not found.")
            return

        client = compute_v1.RoutersClient()
        updatedRouter = Router()
        cloudNAT = RouterNat()
        cloudNAT.name = f"{network}-{GCLOUD_CONSTS.CLOUDNAT_ROUTER_PREFIX}-{region}"
        cloudNAT.source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
        cloudNAT.nat_ip_allocate_option = "AUTO_ONLY"
        cloudNAT.auto_network_tier = "STANDARD"
        updatedRouter.nats = [cloudNAT]

        wait(client.patch(project=project_id, region=region, router=router, router_resource=updatedRouter))

        global rollback_fns
        client_here = client
        region_here = region
        router_here = router
        remove_fn = lambda client=client_here, region=region_here, router=router_here: wait(client.delete(project=project_id, region=region, router=router))
        rollback_fns += [ [ f"Deleting cloud NAT '{cloudNAT.name}'", remove_fn ] ]

    except Exception as e:
        raise WrapInvokerException(e)

def checkDefaultRoute(project: str, network: str) -> bool:
    network_full = GCLOUD_CONSTS.GNETWORK_TEMPLATE.format(project, network)
    client = compute_v1.RoutesClient()
    for r in client.list(project=project):
        if r.next_hop_gateway and r.network == network_full:
            return True

    return False

def createDefaultRoute(project: str, network: str):
    route = Route()
    route.name = f"{network}-{GCLOUD_CONSTS.DEFAULT_ROUTE_SUFFIX}"
    route.description = "Default route for AROVA network"
    route.dest_range = "0.0.0.0/0"
    route.network = GCLOUD_CONSTS.GNETWORK_TEMPLATE.format(project, network)
    route.next_hop_gateway = GCLOUD_CONSTS.PROJECT_DEFAULT_GATEWAY.format(project)

    try:
        client = compute_v1.RoutesClient()
        wait(client.insert(project=project, route_resource=route))

        global rollback_fns
        client_here = client
        route_here = route.name
        project_here = project
        remove_fn = lambda client=client_here, route=route_here: wait(client.delete(project=project_here, route=route_here))
        rollback_fns += [ [ f"Deleting default route '{route.name}'", remove_fn ] ]
    except Exception as e:
        raise WrapInvokerException(e)

def getADCDiskByName(name: str, region: str) -> Disk:
    try:
        client = compute_v1.RegionDisksClient()
        return client.get(project=project_id, region=region, disk=name)
    except NotFound as e:
        pass
    except Exception as e:
        print("Exception finding disk: proj {}, region {}, name {}".format(project_id, region, name))
        print(e)

    return None

def getADCDiskName(sregion: str, dregion: str) -> str:
    return f"{GCLOUD_CONSTS.DISK_NAME_ADC_PREFIX}-{sregion}-{dregion}"

def getADCDisk(sregion: str, dregion: str) -> Disk:
    diskName = getADCDiskName(sregion, dregion)
    return getADCDiskByName(diskName, sregion)

def createADCDisk(
        sregion: str,
        szone1: str,
        szone2: str,
        dregion: str,
        dzone1: str,
        dzone2: str,
        isPrimary: bool,
        primaryDisk: Disk,
        kms_key: CryptoKey,
        sa: str) -> Disk:
    try:
        client = compute_v1.RegionDisksClient()
        disk = Disk()
        disk.name = getADCDiskName(sregion, dregion)
        disk.replica_zones = [GCLOUD_CONSTS.REPLICA_ZONES_TEMPLATE.format(project_id, szone1 if isPrimary else dzone1),
                              GCLOUD_CONSTS.REPLICA_ZONES_TEMPLATE.format(project_id, szone2 if isPrimary else dzone2)]
        disk.size_gb = GCLOUD_CONSTS.DISK_SIZE_ADC_GB
        disk.type_ = GCLOUD_CONSTS.DISK_TYPE_REGIONAL_PD_BALANCED_TEMPLATE.format(project_id, sregion)
        disk.labels = {}
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_SRC_REGION] = sregion
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_DEST_REGION] = dregion
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_SRC_ZONE1] = szone1
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_SRC_ZONE2] = szone2
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_DEST_ZONE1] = dzone1
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_DEST_ZONE2] = dzone2
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_AROVA_DISK] = "true"
        # The label value can only contain lowercase letters, numeric characters, underscores and dashes.
        # The label value can be at most 63 characters long. International characters are allowed.
        disk.labels[GCLOUD_CONSTS.DISK_LABEL_IS_PRIMARY] = str(isPrimary).lower()
        if not isPrimary:
            disk.async_primary_disk.disk = primaryDisk.self_link

        if kms_key:
            disk.disk_encryption_key = CustomerEncryptionKey()
            disk.disk_encryption_key.kms_key_name = kms_key.name
            disk.disk_encryption_key.kms_key_service_account = sa

        wait(client.insert(project=project_id, region=sregion, disk_resource=disk))

        global rollback_fns
        client_here = client
        region_here = sregion
        disk_here = disk.name
        remove_fn = lambda client=client_here, disk=disk_here, region=region_here: wait(client.delete(project=project_id, region=region, disk=disk))
        rollback_fns += [ [ f"Deleting disk '{disk.name}'", remove_fn ] ]

        return getADCDisk(sregion, dregion)
    except Exception as e:
        raise WrapInvokerException(e)

def deleteDiskByName(project: str, name: str, region: str) -> None:
    client = compute_v1.RegionDisksClient()
    wait(client.delete(project=project, region=region, disk=name))

def startReplication(primaryDisk, secondaryDisk, primaryRegion):
    try:
        client = compute_v1.RegionDisksClient()
        req = RegionDisksStartAsyncReplicationRequest()
        req.async_secondary_disk = secondaryDisk.self_link

        wait(client.start_async_replication(project=project_id, region=primaryRegion,
                disk=primaryDisk.name, region_disks_start_async_replication_request_resource=req))
    except Exception as e:
        raise WrapInvokerException(e)

def stopReplication(disk: str, region: str) -> None:
    try:
        client = compute_v1.RegionDisksClient()
        wait(client.stop_async_replication(project=project_id, region=region, disk=disk))
    except Exception as e:
        raise WrapInvokerException(e)

# Secondary disk replication status can be accessed through .async_primary_disk member.
def getAsyncReplStatusForSecondaryDisk(diskName: str, region: str) -> Optional[AsyncReplState]:
    disk = getADCDiskByName(diskName, region)
    if not disk or not disk.resource_status or not disk.resource_status.async_primary_disk:
        return None
    # Final conversion to the 'blessed' enum type.
    return AsyncReplState[disk.resource_status.async_primary_disk.state]

# Actual primary disk replication status can be accessed through one of
# .async_secondary_disks states.
def getAsyncReplStatusForPrimaryDisk(adc_disk_name: str, region: str) -> Optional[tuple[str, AsyncReplState]]:
    disk = getADCDiskByName(adc_disk_name, region)
    if not disk or not disk.resource_status or not disk.resource_status.async_secondary_disks:
        return None

    for disk_name in disk.resource_status.async_secondary_disks:
        full_name = disk_name
        if not full_name.startswith("https://"):
            full_name = "https://www.googleapis.com/compute/v1/" + full_name
        status = disk.resource_status.async_secondary_disks[disk_name]
        if status:
            # Final conversion to the 'blessed' enum type.
            return full_name, AsyncReplState[status.state]
    return None

def waitDiskStatus(disk_name: str, region: str, expected_state: AsyncReplState) -> bool:
    try:
        state = getAsyncReplStatusForSecondaryDisk(disk_name, region)
        if not state:
            return False

        attempts = 5
        print("Waiting", end="", flush=True)
        while state != expected_state and attempts > 0:
            print(".", end="", flush=True)
            time.sleep(10)
            attempts = attempts - 1
            state = getAsyncReplStatusForSecondaryDisk(disk_name, region)
            if not state:
                return False
    except Exception as e:
        print(e)
        state = None

    print("", flush=True)
    return state == expected_state

def getRegionsInfo(disk: Disk) -> Optional[tuple[str, str, bool]]:
    if not disk or len(disk.labels) < 7:
        return None

    srcRegion = None
    destRegion = None
    isPrimary = None
    for k,v in disk.labels.items():
        match (k):
            case GCLOUD_CONSTS.DISK_LABEL_SRC_REGION:
                srcRegion = v
            case GCLOUD_CONSTS.DISK_LABEL_DEST_REGION:
                destRegion = v
            case GCLOUD_CONSTS.DISK_LABEL_IS_PRIMARY:
                isPrimary = True if v and v.lower() == "true" else False

    # return all required labels for AROVA disk if present
    # return empty tuple for non-AROVA disk.
    if srcRegion and destRegion and isPrimary is not None:
        return srcRegion, destRegion, isPrimary

    return None

def isRegionLocation(location_id: str) -> bool:
    try:
        client = compute_v1.RegionsClient()
        client.get(project=project_id, region=location_id)
        return True
    except BadRequest as e:
        return False

def getKMSKey(key_name: str) -> Optional[CryptoKey]:
    client = kms_v1.KeyManagementServiceClient()
    try:
        kms_key = client.get_crypto_key(name=key_name)
        return kms_key if kms_key and kms_key.purpose and kms_key.purpose == CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT else None
    except NotFound as e:
        return None

def findKMSKey(location_id: str, key_ring: str, key: str) -> Optional[CryptoKey]:
    client = kms_v1.KeyManagementServiceClient()

    # get KMS key if user provides full information.
    if key_ring:
        fullname = GCLOUD_CONSTS.KMS_KEY_NAME_TEMPLATE.format(
            project=project_id,
            location=location_id,
            key_ring_name=key_ring,
            key_name=key
        )
        return getKMSKey(fullname)

    # enumerate KMS keys if user doesn't provide name of key_ring
    rings = []

    location_path = GCLOUD_CONSTS.KMS_LOCATION_NAME_TEMPLATE.format(
        project=project_id,
        location=location_id
    )
    for ring in client.list_key_rings(parent=location_path):
        index = ring.name.rfind("/")
        if index == -1:
            rings.append(ring.name)
        else:
            rings.append(ring.name[index+1:])

    for ring in rings:
        kms_key = findKMSKey(location_id, ring, key)
        if kms_key:
            return kms_key

    return None

def getVmByName(name: str, zone: str) -> Optional[Instance]:
    try:
        client = compute_v1.InstancesClient()
        return client.get(project=project_id, zone=zone, instance=name)
    except NotFound as e:
        pass
    except Exception as e:
        print(e)

    return None


# VMs would have the same name even if deployed in the remote region,
# so use_dest_region allows to find a VM in a 'remote' region.
def findAROVAVM(project: str, sregion: str, dregion: str, vm_prefix: str,
                use_dest_region: bool = False) -> tuple[Optional[Instance], Optional[str]]:
    try:
        rzclient = compute_v1.RegionZonesClient()
        zones = [ z.name for z in rzclient.list(project=project,
                region=dregion if use_dest_region else sregion) ]

        for zone in zones:
            client = compute_v1.InstancesClient()
            req = ListInstancesRequest()
            req.project = project
            req.zone = zone
            req.filter = GCLOUD_CONSTS.AROVA_VM_SEARCH_CRITERIA.format(
                vm_label = GCLOUD_CONSTS.AROVA_VM_LABEL,
                src_region_label = GCLOUD_CONSTS.AROVA_VM_LABEL_SRC_REGION,
                src_region = sregion,
                dest_region_label = GCLOUD_CONSTS.AROVA_VM_LABEL_DEST_REGION,
                dest_region = dregion
            )
            vms = client.list(request=req)
            if len(vms.items) > 0:
                return vms.items[0], None

            vm = getVmByName(generateVmName(sregion, dregion, vm_prefix), zone)
            if vm:
                return None, f"Non-AROMA VM '{vm.name}' found in '{zone}'."
    except Exception as e:
        print(e)
        return None, str(e)

    return None, None

def getBootDiskImage(osFamily: str, version: str) -> Image:
    try:
        client = compute_v1.ImagesClient()
        return client.get_from_family(project=f"{osFamily}-cloud", family=f"{osFamily}-{version}")
    except NotFound as e:
        pass
    except Exception as e:
        print(e)

    return None

def isVMReady(name, zone):
    # When VM is created script must wait while cloud finalizes IAP tunnelling.
    attempts = 5 * (60 / 10) # Wait 5 minutes, check VM accessibility (through SSH) each 10 second.
    success = 0              # Expect 2 successful operations in a row
    print("Waiting for VM to come up", end="", flush=True)
    while attempts > 0 and success < 2:
        try:
            execCommand = prepareGcloudCommand(name, zone, AROVAVM_CONSTS.CMD_CHECK_VM)
            runCommand(execCommand)
            success += 1
            print("+", end="", flush=True)
        except Exception as e:
            print(".", end="", flush=True)
            success = 0
        time.sleep(10)

    print("", flush=True)

    return success >= 2

# NB! In case of exception, rollback_fns will be used for cleanup.
def createVM(
        zone: str,
        sregion: str,
        dregion: str,
        bootDiskImage: Image,
        ADCDisk: Disk,
        network: Network,
        sa: str,
        vm_prefix: str,
        kms_key: CryptoKey):
    client = compute_v1.InstancesClient()

    sda = AttachedDisk()
    sda.boot = True
    sda.auto_delete = True
    sda.disk_size_gb = GCLOUD_CONSTS.DISK_SIZE_BOOT_GB
    sda.mode = "READ_WRITE"
    sda.initialize_params = AttachedDiskInitializeParams()
    sda.initialize_params.source_image = bootDiskImage.self_link
    sda.initialize_params.disk_type = GCLOUD_CONSTS.DISK_TYPE_ZONE_PD_PALANCED_TEMPLATE.format(project_id, zone)
    if kms_key:
        sda.disk_encryption_key = CustomerEncryptionKey()
        sda.disk_encryption_key.kms_key_name = kms_key.name
        sda.disk_encryption_key.kms_key_service_account = sa

    sdb = AttachedDisk()
    sdb.boot = False
    sdb.auto_delete = False
    sdb.mode = "READ_WRITE"
    sdb.source = ADCDisk.self_link

    instance = Instance()
    instance.name = generateVmName(sregion, dregion, vm_prefix)
    # customize e2-medium VM to use 8 Gb (8192=8*1024)
    # https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#python_3
    instance.machine_type = GCLOUD_CONSTS.MACHINE_TYPE_TEMPLATE.format(project_id, zone, "e2-custom-medium-8192")
    instance.disks = [ sda, sdb ]

    instance.network_interfaces = [ NetworkInterface() ]
    if network.auto_create_subnetworks:
        instance.network_interfaces[0].network = network.self_link
    else:
        deploy_region = getRegion(zone, zone)
        subnets = [subnetUrl for subnetUrl in network.subnetworks if parseSubnetUrl(subnetUrl)[0] == deploy_region]
        if not subnets:
            raise InvalidNetworkException(f"Subnet missing in region {deploy_region} network {network.name}")
        instance.network_interfaces[0].subnetwork = subnets[0]

    instance.service_accounts = [ ServiceAccount() ]
    instance.service_accounts[0].email = sa
    instance.service_accounts[0].scopes = [ "https://www.googleapis.com/auth/cloud-platform" ]

    instance.labels = {
        GCLOUD_CONSTS.AROVA_VM_LABEL            : "true",
        GCLOUD_CONSTS.AROVA_VM_LABEL_SRC_REGION : sregion,
        GCLOUD_CONSTS.AROVA_VM_LABEL_DEST_REGION: dregion }
    try:
        wait(client.insert(project=project_id, zone=zone, instance_resource=instance))
    except Exception as e:
        raise WrapInvokerException(e)

    print(f"Done deploying VM {instance.name}")
    global rollback_fns
    client_here = client
    zone_here = zone
    name_here = instance.name
    remove_fn = lambda client=client_here, zone=zone_here, name=name_here: wait(client.delete(project=project_id, zone=zone, instance=name))
    rollback_fns += [ [ f"Deleting VM '{instance.name}'", remove_fn ] ]

def writeCmdToLog(description: str, execCommand: str | list[str], stdout: str) -> None:
    try:
        with open(AROVAVM_CONSTS.EXEC_LOG, "at") as log:
            print(description, file=log)
            print(datetime.datetime.now(), file=log)
            if isinstance(execCommand, list):
                print(" ".join(execCommand), file=log)
            else:
                print(execCommand, file=log)
            print("--------", file=log)
            if stdout:
                print(stdout, file=log)
                print("--------", file=log)
    except:
        print(f"Failed to write to the log file {AROVAVM_CONSTS.EXEC_LOG}.")

def runCommand(execCommand: str | list[str], shell=False, stdinData=None, description=None) -> None:
    stdout = ""
    if description:
        print(description)
    with Popen(execCommand, shell=shell, stderr=STDOUT, stdout=PIPE,
            stdin=PIPE if stdinData else None, text=True) as process:
        if stdinData:
            process.stdin.write(stdinData)
        dots_in_line = 0
        while True:
            try:
                stdout, _ = process.communicate(timeout=3)
            except TimeoutExpired:
                if description:
                    dots_in_line += 1
                    print(".", end=limit_dots(dots_in_line), flush=True)
            if process.poll() is not None:
                break
        process.wait()
        if process.returncode != 0:
            raise ExternalProcessException(execCommand, process.returncode, stdout)
        if description:
            print("Done.")
            writeCmdToLog(description, execCommand, stdout)

def getBundle() -> str:
    removeBundle()
    runCommand(AROVAVM_CONSTS.CMD_GET_BUNDLE_VERSION, description="Download bundle version.")
    runCommand(AROVAVM_CONSTS.CMD_DOWNLOAD_BUNDLE, description="Download bundle.")
    runCommand(AROVAVM_CONSTS.CMD_GET_BUNDLE_SHA256, description="Download bundle checksums.")
    runCommand(AROVAVM_CONSTS.CMD_CHECK_BUNDLE, description="Validate bundle.")
    try:
        with open(AROVAVM_CONSTS.BUNDLE_VERSION_NAME, 'r') as version_file:
            bundle_version = version_file.read().rstrip('\n')
    except Exception as e:
        raise WrapInvokerException(e)
    if not bundle_version:
        raise DownloadFailedException("Version file is empty")
    return bundle_version


def uploadBundle(vm, zone):
    execCommand = AROVAVM_CONSTS.CMD_SCP + [ f"--zone={zone}", f"--project={project_id}"]
    execCommand.append(AROVAVM_CONSTS.BUNDLE_NAME)
    execCommand.append(f"{vm}:/tmp/{AROVAVM_CONSTS.BUNDLE_NAME}")
    runCommand(execCommand, description=f"Upload bundle to {vm}.")

def removeBundle() -> None:
    for local_file in AROVAVM_CONSTS.ALL_BUNDLE_NAMES:
        if os.path.exists(local_file) and os.path.isfile(local_file):
            os.remove(local_file)

def prepareGcloudCommand(vm, zone, cmd):
    return AROVAVM_CONSTS.CMD_SSH + [vm, "--tunnel-through-iap", f"--zone={zone}", f"--project={project_id}"] + cmd

def runRemoteBundleToDeploy(
        vm_name: str,
        src_pri_zone: str,
        adc_disk_name: str,
        adc_disk_id: str,
        dest_pri_zone: str,
        dest_sec_zone: str,
        subscription_id: str,
        kms_key: CryptoKey,
        adminPasswd: str,
        bundle_version: str) -> None:

    src_region  = getRegion(src_pri_zone, src_pri_zone)
    dest_region = getRegion(dest_pri_zone, dest_sec_zone)

    createCmd = AROVAVM_CONSTS.CMD_CREATE_TMP_DIR.format(version=bundle_version)
    execCommand = prepareGcloudCommand(vm_name, src_pri_zone, [createCmd])
    runCommand(execCommand, description=f"Create working directory on {vm_name}.")

    execCommand = prepareGcloudCommand(vm_name, src_pri_zone, AROVAVM_CONSTS.CMD_EXTRACT_BUNDLE)
    runCommand(execCommand, description=f"Unpack bundle on {vm_name}.")

    execCommand = prepareGcloudCommand(vm_name, src_pri_zone, AROVAVM_CONSTS.CMD_RUN_AROVA_TO_DEPLOY)
    runCommand(execCommand, stdinData=adminPasswd + "\n", description="Run AROVA.")

    setupCmd = AROVAVM_CONSTS.CMD_SETUP_AROVA_TO_DEPLOY.format(
        src_reg=src_region,
        disk_id=adc_disk_id,
        disk_name=adc_disk_name,
        dest_region=dest_region,
        dest_zone_pri=dest_pri_zone,
        dest_zone_sec=dest_sec_zone,
        subscription_id=subscription_id,
        kms_key_name=kms_key.name if kms_key else ""
    )
    execCommand = prepareGcloudCommand(vm_name, src_pri_zone, [setupCmd])
    runCommand(execCommand, stdinData=adminPasswd + "\n", description="Config AROVA.")

def runRemoteBundleToRecovery(
        vm_name: str,
        src_pri_zone: str,
        dest_pri_zone: str,
        dest_sec_zone: str,
        adc_disk_name: str,
        adc_disk_id: str,
        src_region: str,
        is_fbfo: bool,
        kms_key: CryptoKey,
        adminPasswd: str,
        bundle_version: str) -> None:

    createCmd = AROVAVM_CONSTS.CMD_CREATE_TMP_DIR.format(version=bundle_version)
    cmdLine = prepareGcloudCommand(vm_name, src_pri_zone, [createCmd])
    runCommand(cmdLine, description=f"Create working directory on {vm_name}.")

    cmdLine = prepareGcloudCommand(vm_name, src_pri_zone, AROVAVM_CONSTS.CMD_EXTRACT_BUNDLE)
    runCommand(cmdLine, description=f"Unpack bundle on {vm_name}.")

    cmdLine = prepareGcloudCommand(vm_name, src_pri_zone, AROVAVM_CONSTS.CMD_RUN_AROVA_TO_RECOVERY)
    runCommand(cmdLine, stdinData=adminPasswd + "\n", description="Run AROVA.")

    print("FB/FO" if is_fbfo else "Redeploy")
    setupCmd = AROVAVM_CONSTS.CMD_SETUP_AROVA_TO_RECOVERY.format(
        src_reg=src_region,
        disk_id=adc_disk_id,
        disk_name=adc_disk_name,
        operation_type="fbfo" if is_fbfo else "redeploy",
        dest_zone_pri=dest_pri_zone,
        dest_zone_sec=dest_sec_zone,
        kms_key_name=kms_key.name if kms_key else ""
    )
    cmdLine = prepareGcloudCommand(vm_name, src_pri_zone, [setupCmd])
    runCommand(cmdLine, stdinData=adminPasswd + "\n", description="Config AROVA.")

def getRegion(pri_zone: str, sec_zone: str) -> str:
    index = pri_zone.rfind("-")
    if index == -1:
        raise BadZoneException(f"Unknown zone '{pri_zone}'", pri_zone)
    region1 = pri_zone[:index]

    index = sec_zone.rfind("-")
    if index == -1:
        raise BadZoneException(f"Unknown zone '{sec_zone}'", sec_zone)
    region2 = sec_zone[:index]

    if region1 != region2:
        raise NoCommonRegionException(f"'{pri_zone}' and {sec_zone} are not in one region.", pri_zone, sec_zone)

    return region1

def isZoneAccessible(zone: str) -> bool:
    try:
        client = compute_v1.ZonesClient()
        zone = client.get(project=project_id, zone=zone)
        status = Zone.Status[zone.status]
        return status == Zone.Status.UP
    except Exception as e:
        print(e)
        return False

def checkUserNetwork(network_name: str, regions: list[str]) -> bool:
    network = getNetwork(network_name)
    if network is None:
        print(f"User-specified network '{network_name}' not found.")
        return False

    unmatched = checkAndCreateSubnets(network, regions.copy(), create=False)
    if unmatched:
        print(f"Network {network_name} does not have subnets in {','.join(unmatched)}")
        return False

    routerName = getNATRouterName(network_name, regions[0])
    if not routerName:
        print("Router not found.")
        return False

    if not checkCloudNAT(regions[0], routerName):
        print("Cloud NAT not found.")
        return False

    if not checkDefaultRoute(project_id, network_name):
        print(f"Default route for {network_name} not found.")
        return False

    print(f"Manually specified network '{network_name}' verified to contain sufficient routing in region {regions[0]}")
    return True


def createNetworkInfrastructure(network_name: str, regions: list[str], sa: str) -> Network:
    srcRegion = regions[0]
    network = getNetwork(network_name)
    if network:
        print(f"Default network '{network_name}' found.")
    else:
        print(f"Creating default network '{network_name}'.")
        network = createNetwork(network_name)

    checkAndCreateSubnets(network, regions, create=True)
    # Update network configuration object.
    network = getNetwork(network_name)

    firewallState = checkAccessFirewallRule(network_name, sa)
    if firewallState == FirewallState.ENABLED:
        print("Firewall rule for AROVA access is active.")
    elif firewallState == FirewallState.DISABLED:
        print("Correcting the existing AROVA access firewall rule.")
        enableFirewallAccessRule(network_name, sa)
    else:
        print("Firewall rule for AROVA access is absent. Creating it.")
        createAccessFirewallRule(network_name, sa)

    routerName = getNATRouterName(network_name, srcRegion)
    if routerName:
        print(f"Router {routerName} found.")
    else:
        print("No router found. Create it.")
        routerName = createNATRouter(network_name, srcRegion)

    if checkCloudNAT(srcRegion, routerName):
        print("Cloud NAT found.")
    else:
        print("Creating Cloud NAT.")
        createCloudNAT(network_name, srcRegion, routerName)

    if checkDefaultRoute(project_id, network_name):
        print(f"Default route for {network_name} found.")
    else:
        print(f"Creating default route for {network_name}.")
        createDefaultRoute(project_id, network_name)

    return network

def installSoftwareAtDeploy(vm_name: str,
                   src_pri_zone: str,
                   adc_disk: Disk,
                   dest_pri_zone: str,
                   dest_sec_zone: str,
                   subscription_id: str,
                   kms_key: CryptoKey,
                   adminPasswd: str):
    runCommand(AROVAVM_CONSTS.CMD_UPDATE_SSH_KEYS + [ f"--project={project_id}"],
        description="Updating SSH keys on the new VM...")
    if not isVMReady(vm_name, src_pri_zone):
        print(f"VM '{vm_name}' is not accessible via SSH. Stop deploy.")

    vers = getBundle()
    print(f"AROVA version: {vers}")
    uploadBundle(vm_name, src_pri_zone)
    removeBundle()
    runRemoteBundleToDeploy(vm_name, src_pri_zone, adc_disk.name, adc_disk.id,
        dest_pri_zone, dest_sec_zone, subscription_id, kms_key, adminPasswd, vers)

def installSoftwareAtRecovery(vm_name: str,
                     zone: str,
                     src_region: str,
                     adc_disk: Disk,
                     is_fbfo: bool,
                     kms_key: CryptoKey,
                     adminPasswd: str) -> None:
    runCommand(AROVAVM_CONSTS.CMD_UPDATE_SSH_KEYS + [ f"--project={project_id}"])
    if not isVMReady(vm_name, zone):
        print(f"VM '{vm_name}' is not accessible via SSH. Stop deploy.")

    vers = getBundle()
    print(f"AROVA version: {vers}")
    uploadBundle(vm_name, zone)
    removeBundle()

    dest_pri_zone = None
    dest_sec_zone = None
    if adc_disk.labels[GCLOUD_CONSTS.DISK_LABEL_IS_PRIMARY] and adc_disk.labels[GCLOUD_CONSTS.DISK_LABEL_IS_PRIMARY].lower() == "true":
        dest_pri_zone = adc_disk.labels[GCLOUD_CONSTS.DISK_LABEL_DEST_ZONE1]
        dest_sec_zone = adc_disk.labels[GCLOUD_CONSTS.DISK_LABEL_DEST_ZONE2]
    else:
        dest_pri_zone = adc_disk.labels[GCLOUD_CONSTS.DISK_LABEL_SRC_ZONE1]
        dest_sec_zone = adc_disk.labels[GCLOUD_CONSTS.DISK_LABEL_SRC_ZONE2]

    runRemoteBundleToRecovery(vm_name, zone, dest_pri_zone, dest_sec_zone, adc_disk.name, adc_disk.id,
        src_region, is_fbfo, kms_key, adminPasswd, vers)

def generateVmName(sregion: str, dregion: str, prefix: str) -> str:
    if prefix:
        return f"{prefix}-{GCLOUD_CONSTS.VM_NAME_PREFIX}-{sregion}-{dregion}"
    else:
        return f"{GCLOUD_CONSTS.VM_NAME_PREFIX}-{sregion}-{dregion}"

def checkSubscription(subscriptionID: str, sregion: str, dregion: str) -> bool:
    subscriptionParams = {
        "subUUID" : subscriptionID,
        "arovaId" : AROVAVM_CONSTS.AROVA_ID_TO_CHECK,
        "sourceRegion": sregion,
        "destinationRegion": dregion
    }

    responce = requests.get(AROVAVM_CONSTS.URL_CHECK_SUBSCRIPTION, params=subscriptionParams)
    match(responce.status_code):
        case 200:
            return True
        case 401:
            print(f"ERROR: No customer with subscription id ('{subscriptionID}') or account is closed or AROVA disabled or no current billing plan.")
        case 500:
            print("ERROR: Internal ARO-MS server error.")
        case _:
            print(f"ERROR: Unexpected status code ({responce.status_code}).")

    return False

def checkStrength(passwd: str) -> bool:
    if len(passwd) < 8:
        return False
    hasUpper = 0        # we want at least 3 categories out of following 5
    hasLower = 0
    hasNumer = 0
    hasSymbl = 0
    hasOther = 0
    for char in passwd:
        if char in "\t\r\n\b":
            return False
        if char.isupper():
            hasUpper = 1
        elif char.islower():
            hasLower = 1
        elif char.isnumeric():
            hasNumer = 1
        elif char in "\\\" '`:;~!@#$%^&*/+-_=()[]{}<>|,.?":
            hasSymbl = 1
        else:
            hasOther = 1
    return hasUpper + hasLower + hasNumer + hasSymbl + hasOther >= 3

def getPass() -> str:
    adminPasswd = None
    attempt = 1
    while attempt <= 3:
        adminPasswd = getpass.getpass(prompt="Enter a new password for AROVA’s user account 'admin':")
        adminPasswd = adminPasswd.strip()
        if not checkStrength(adminPasswd):
            print("Password is expected to be at least 8 chars and have 3 of: Upper/Lower/Numeric/Symbols\n")
            continue
        adminPasswdCopy = getpass.getpass(prompt="Retype password for AROVA’s user account 'admin':")
        adminPasswdCopy = adminPasswdCopy.strip()
        if adminPasswd == adminPasswdCopy:
            adminPasswdCopy = None
            return adminPasswd
        else:
            print("Entered passwords are not equal. Please re-enter.")
            attempt += 1
    else:
        raise BadPasswordException()

def cmd_deploy(
        src_region: str,
        src_pri_zone: str,
        src_sec_zone: str,
        dest_region: str,
        dest_pri_zone: str,
        dest_sec_zone: str,
        svc_account: str,
        subscriptionId: str,
        network_name: str,
        vm_prefix: str,
        location_id: str,
        key_ring: str,
        key_name: str) -> None:
    remotecheck()

    if not find_svc_account(project_id, svc_account):
        print(f"Service account {svc_account} is not found in project {project_id}.")
        return

    srcRegion = getRegion(src_pri_zone, src_sec_zone)
    if src_region is not None and src_region != srcRegion:
        print(f"ERROR: Source zones ('{src_pri_zone}' and '{src_sec_zone}') are not in region '{src_region}'")
        return

    destRegion = getRegion(dest_pri_zone, dest_sec_zone)
    if dest_region is not None and dest_region != destRegion:
        print(f"ERROR: Destination zones ('{dest_pri_zone}' and '{dest_sec_zone}') are not in region '{dest_region}'")
        return

    if srcRegion == destRegion:
        print(f"ERROR: Source ({srcRegion}) and destination ({destRegion}) regions are same.")
        return

    if network_name:
        if not checkUserNetwork(network_name, [srcRegion, destRegion]):
            return
        network = getNetwork(network_name)
    else:
        network = createNetworkInfrastructure(GCLOUD_CONSTS.NETWORK_NAME, [srcRegion, destRegion], svc_account)

    kms_key = None
    if location_id or key_name:
        kms_key = findKMSKey(location_id, key_ring, key_name)
        if not kms_key:
            if key_ring:
                print(f"ERROR: KMS Key '{key_name}' in '{key_ring}' not found in '{location_id}' location.")
            else:
                print(f"ERROR: KMS Key '{key_name}' not found in '{location_id}' location.")

            return

    if not checkSubscription(subscriptionId, srcRegion, destRegion):
        return

    primaryADCDisk = getADCDisk(srcRegion, destRegion)
    if primaryADCDisk:
        print(f"ERROR: primary data disk from {srcRegion} to {destRegion} found: {primaryADCDisk.name}")
        return

    #secondaryADCDisk = getADCDisk(destRegion, srcRegion)
    #if secondaryADCDisk:
    #    print(f"ERROR: data disk (secondary) from {destRegion} to {srcRegion} found.")
    #    return

    (vm, err) = findAROVAVM(project_id, srcRegion, destRegion, vm_prefix)
    if err:
        print(err)
        return

    if vm:
        print(f"ERROR: {vm.name} found.")
        return

    adminPasswd = getPass()

    print(f"No data disk (primary) from {srcRegion} to {destRegion} found. Creating it.")
    primaryADCDisk = createADCDisk(
        srcRegion,
        src_pri_zone,
        src_sec_zone,
        destRegion,
        dest_pri_zone,
        dest_sec_zone,
        True,
        None,
        kms_key,
        svc_account)

    # print(f"No data disk (secondary) from {destRegion} to {srcRegion} found. Creating it.")
    # secondaryADCDisk = createADCDisk(destRegion, dest_pri_zone, dest_sec_zone, srcRegion, False, primaryADCDisk)

    # if primaryADCDisk.async_secondary_disks and len(primaryADCDisk.async_secondary_disks) != 0:
    #    print("Secondary data disk attached.")
    # else:
    #    print("Secondary disk is not assigned. Adding it to primary.")
    #    startReplication(primaryADCDisk, secondaryADCDisk, srcRegion)

    print("No AROVA VM found. Creating it.")
    bootDiskImage = getBootDiskImage("almalinux", "9")
    if bootDiskImage is None:
        print("Cannot find image for boot disk. Stop.")
        return

    createVM(src_pri_zone, srcRegion, destRegion, bootDiskImage, primaryADCDisk, network, svc_account, vm_prefix, kms_key)

    instance_name = generateVmName(srcRegion, destRegion, vm_prefix)
    installSoftwareAtDeploy(
        instance_name,
        src_pri_zone,
        primaryADCDisk,
        dest_pri_zone,
        dest_sec_zone,
        subscriptionId,
        kms_key,
        adminPasswd)

    print("Use the following command to enable IAP tunnel to AROVA UI:")
    print(AROVAVM_CONSTS.CMD_IAP_ACCESS.format(zone=src_pri_zone, instance=instance_name, project=project_id))

def cmd_recovery(
        src_region: str,
        src_zone: str,
        ADCDiskName: str,
        svc_account: str,
        force_stop_replication: bool,
        network_name: str,
        vm_prefix: str) -> None:
    remotecheck()

    if not find_svc_account(project_id, svc_account):
        print(f"Service account {svc_account} is not found in project {project_id}.")
        return

    if src_zone and src_region != getRegion(src_zone, src_zone):
        print(f"ERROR: Zone '{src_zone}' is not in '{src_region}' region.")
        return

    disk = getADCDiskByName(ADCDiskName, src_region)
    if not disk:
        print(f"ERROR: No disk ('{ADCDiskName}') found in '{src_region}' region.")
        return

    detach_info = None
    delete_info = []
    if disk.users:
        index = disk.users[0].rfind("/")
        if index == -1:
            print(f"ERROR: Disk '{ADCDiskName}' is used - impossible to continue.")
            return
        instance_name = disk.users[0][index + 1:]
        print(f"ERROR: Disk '{ADCDiskName}' is used by '{instance_name}' instance.")
        if not force_stop_replication:
            return
        print(f'Forced operation: detaching disk from {instance_name}')
        zones = []
        for replica_zone in disk.replica_zones:
            project, zone = parseZoneUrlToParameters(replica_zone)
            zones += [ zone ]
        detach_info = [ instance_name, disk.name, zones ]

    regionsInfo = getRegionsInfo(disk)
    if not regionsInfo:
        print(f"ERROR: Disk '{ADCDiskName}' is not AROVA disk.")
        return

    (srcRegion, destRegion, isPrimary) = regionsInfo

    # Find an AROVA VM in the destination region.
    (vm, err) = findAROVAVM(project_id, srcRegion, destRegion, vm_prefix, True)
    if err:
        print(err)
        return

    if vm:
        print(f"ERROR: inverse {vm.name} found.")
        if not force_stop_replication:
            return
        print(f'VM {vm.name} deletion forced.')
        delete_info += [ vm ]

    # Find an AROVA VM in the source region.
    (vm, err) = findAROVAVM(project_id, srcRegion, destRegion, vm_prefix)
    if err:
        print(err)
        return

    if vm:
        print(f"ERROR: {vm.name} found.")
        if not force_stop_replication:
            return
        print(f'VM {vm.name} deletion forced.')
        delete_info += [vm]

    adminPasswd = getPass()

    is_fbfo = False
    fbfo_reason = None
    if detach_info:
        vm_name, detach_disk, zones = detach_info

        vm = None
        zone = None
        for zone in zones:
            vm = getVmByName(vm_name, zone)
            if vm:
                break
        if not vm:
            print(f"Failed to find instance {vm_name} holding disk {detach_disk}.")
            return
        print(f'Detaching disk {detach_disk} from VM {vm_name} in {zone}')
        vm_disk_device_name = ""
        for vm_disk in vm.disks:
            project, region, disk_name = parseDiskUrlToParameters(vm_disk.source)
            if disk_name == detach_disk:
                vm_disk_device_name = vm_disk.device_name
                break

        detach_disk_from_vm(vm, vm_disk_device_name)

    if delete_info:
        for vm in delete_info:
            print(f'Deleting VM {vm.name}')
            client = compute_v1.InstancesClient()
            project, zone = parseZoneUrlToParameters(vm.zone)
            try:
                wait(client.delete(project=project, zone=zone, instance=vm.name))
            except Exception as e:
                raise WrapInvokerException(e)

    if disk.resource_status and disk.resource_status.async_primary_disk:
        state = AsyncReplState[disk.resource_status.async_primary_disk.state]
        if state == AsyncReplState.STOPPED:
            is_fbfo = True
            fbfo_reason = "Recovering using secondary ACD disk (no replication to other disk). Mode: FB/FO"
            if disk.resource_status.async_secondary_disks:
                for sec_d, sec_s in disk.resource_status.async_secondary_disks.items():
                    sec_state = AsyncReplState[sec_s.state]
                    if sec_state == AsyncReplState.ACTIVE or sec_state == AsyncReplState.CREATED or \
                            sec_state == AsyncReplState.STARTING:
                        is_fbfo = False
                        fbfo_reason = "Recovering using secondary ACD disk (replication to other disk found). Mode: Redeploy"
                        break
        elif state == AsyncReplState.STOPPING:
            is_fbfo = True
            fbfo_reason = "Recovering using secondary ACD disk. Mode: FB/FO"
            print("Replication is stopping. Waiting.")
            if not waitDiskStatus(ADCDiskName, src_region, AsyncReplState.STOPPED):
                disk = getADCDiskByName(ADCDiskName, src_region)
                print(f"ERROR: Replication status is {disk.resource_status.async_primary_disk.state}.")
                return
        elif state == AsyncReplState.ACTIVE or state == AsyncReplState.CREATED or \
                state == AsyncReplState.STARTING:
            is_fbfo = True
            fbfo_reason = "Recovering using secondary ACD disk (force stop replication). Mode: FB/FO"
            print("Replication is active. Stop replication.")
            stopReplication(ADCDiskName, src_region)
            print("Replication is stopping. Waiting.")
            if not waitDiskStatus(ADCDiskName, src_region, AsyncReplState.STOPPED):
                disk = getADCDiskByName(ADCDiskName, src_region)
                print(f"ERROR: Replication status is {disk.resource_status.async_primary_disk.state}.")
                return
        elif state == AsyncReplState.STATE_UNSPECIFIED or \
                state == AsyncReplState.UNDEFINED_STATE:
            if force_stop_replication:
                is_fbfo = True
                fbfo_reason = "Recovering using secondary ACD disk (replication status is undefined). Mode: FB/FO"
                print("Replication status is unknown. Stop replication.")
                stopReplication(ADCDiskName, src_region)
                if not waitDiskStatus(ADCDiskName, src_region, AsyncReplState.STOPPED):
                    disk = getADCDiskByName(ADCDiskName, src_region)
                    print(f"ERROR: Replication status is {disk.resource_status.async_primary_disk.state}.")
                    return
            else:
                print("Replication status is unknown. Stop recovery. Start AROVA CLI with '--force-stop-replication' to recovery.")
                return
        else:
            print(f"ERROR: Unknown replication status {disk.resource_status.async_primary_disk.state}.")
            return
    else:
        is_fbfo = True
        fbfo_reason = "Recovering using primary ACD disk. Mode: FB/FO"
        if disk.resource_status.async_secondary_disks:
            for sec_d, sec_s in disk.resource_status.async_secondary_disks.items():
                sec_state = AsyncReplState[sec_s.state]
                if sec_state == AsyncReplState.ACTIVE or sec_state == AsyncReplState.CREATED or \
                        sec_state == AsyncReplState.STARTING:
                    is_fbfo = False
                    fbfo_reason = "Recovering using primary ACD disk (replication to other disk found). Mode: Redeploy"
                    break

    if fbfo_reason:
        print(fbfo_reason)
    else:
        print("Unexpected path taken. Mode: Redeploy.")

    disk = getADCDiskByName(ADCDiskName, src_region)

    kms_key = None
    if disk.disk_encryption_key and disk.disk_encryption_key.kms_key_name:
        key_name = disk.disk_encryption_key.kms_key_name
        index = key_name.index("/cryptoKeyVersions")
        if index != -1:
            key_name = key_name[:index]

        kms_key = getKMSKey(key_name)
        if not key_name:
            print(f"ERROR: Cloud KMS Key '{key_name}' not found.")
            return

    if network_name:
        if not checkUserNetwork(network_name, [src_region]):
            return
        network = getNetwork(network_name)
    else:
        network = createNetworkInfrastructure(GCLOUD_CONSTS.NETWORK_NAME, [src_region], svc_account)

    if len([z for z in disk.replica_zones if z.endswith(src_zone)]) < 1:
        print(f"ERROR: Disk '{ADCDiskName}' isn't replicated into '{src_zone}' zone.")
        return
    else:
        if not isZoneAccessible(src_zone):
            print(f"ERROR: {src_zone} is not active.")
            return

    print("No AROVA VM found. Creating it.")
    bootDiskImage = getBootDiskImage("almalinux", "9")
    if bootDiskImage is None:
        print("Cannot find image for boot disk. Stop.")
        return

    createVM(src_zone, srcRegion, destRegion, bootDiskImage, disk, network, svc_account, vm_prefix, kms_key)

    instance_name = generateVmName(srcRegion, destRegion, vm_prefix)
    installSoftwareAtRecovery(
        instance_name,
        src_zone,
        src_region,
        disk,
        is_fbfo,
        kms_key,
        adminPasswd)

    print("Use the following command to enable IAP tunnel to AROVA UI:")
    print(AROVAVM_CONSTS.CMD_IAP_ACCESS.format(zone=src_zone, instance=instance_name, project=project_id))


def detach_disk_from_vm(vm: Instance, vm_disk_device_name: str):
    client = compute_v1.InstancesClient()
    project, zone = parseZoneUrlToParameters(vm.zone)
    request = compute_v1.DetachDiskInstanceRequest(
        device_name=vm_disk_device_name,
        instance=vm.name,
        project=project,
        zone=zone,
    )
    response = client.detach_disk(request=request)
    wait(response)


def find_project(proj_name: str) -> Optional[Project]:
    """
    Find project by name

    Google cloud allows project id to match [a-z][a-z0-9-]*[a-z0-9],
    but project display name can also contain uppercase letters, single quote,
    exclamation mark and spaces.
    If ' or ! happen to be in the query, it behaves weird and either raises an
    exception or doesn't find anything.
    The workaround is to detect whether input string could be display name, and
    if it is - replace offending characters by '.', and then match
    the returned display names against provided name.
    As per https://cloud.google.com/resource-manager/reference/rest/v3/projects/search
    the search is case-independent, so ignoring the upper case letters in input.
    Note that '.' is not a generic wildcard - it won't match anything but
    dash, exclamation mark or single quote.

    :param proj_name: a string containing project ID or project display name.
    :return: project object or None
    """
    proj_client = resourcemanager_v3.ProjectsClient()
    is_name = any(ch in proj_name for ch in "'! ")
    flt_proj_name = proj_name
    if is_name:
        flt_proj_name = proj_name.replace("'", ".").replace("!",".")

    request = resourcemanager_v3.SearchProjectsRequest({
        'query': f'name="{flt_proj_name}"'
            if is_name
            else f'projectId="{proj_name}" OR name="{proj_name}"'
    })
    paged_response = proj_client.search_projects(request=request)
    for project_ in paged_response:
        if project_.state == Project.State.ACTIVE:
            if not is_name or project_.display_name.lower() == proj_name.lower():
                return project_
    return None

def find_organization(project: Project) -> Optional[Organization]:
    org_obj = project
    folders_client = resourcemanager_v3.FoldersClient()
    if not org_obj.parent:
        # No parent to check.
        return None
    # Parse folders up
    while "organization" not in org_obj.parent:
        request = resourcemanager_v3.GetFolderRequest({'name': org_obj.parent})
        org_obj = folders_client.get_folder(request=request)
        if not org_obj.parent:
            # Project / folder is outside org?
            return None
    org_client = resourcemanager_v3.OrganizationsClient()
    request = resourcemanager_v3.GetOrganizationRequest({'name': org_obj.parent})
    return org_client.get_organization(request)

def find_svc_account(project: str, sa_email: str) -> iamServiceAccount:
    sa_full_name = f"projects/{project}/serviceAccounts/{sa_email}"

    iam_client = iam_admin_v1.IAMClient()
    request = iam_admin_v1.GetServiceAccountRequest({'name': sa_full_name})
    try:
        return iam_client.get_service_account(request = request)
    except NotFound as exc:
        return None

def find_or_create_svc_account(project: Project, sa_email: str) -> iamServiceAccount:
    sa = find_svc_account(project.project_id, sa_email)
    if sa:
        return sa
    else:
        print(f"Service account {sa_email} is not found, attempting to create")

    # Continue creating SA...
    request = iam_admin_v1.CreateServiceAccountRequest({
        'name': f"projects/{project.project_id}",
        'account_id': sa_email.split('@')[0],
        'service_account': iamServiceAccount({
            'display_name': "AROVA Service Account",
            'description': "Account providing permissions to deployed AROVA instances"
        })
    })
    iam_client = iam_admin_v1.IAMClient()
    return iam_client.create_service_account(request)


def validate_role(role: Role, is_org: bool = False):
    missing_perms = []
    checklist = GCLOUD_CONSTS.PERMISSIONS_LIST
    if is_org:
        checklist += GCLOUD_CONSTS.ORG_PERMISSIONS_LIST

    for perm in checklist:
        if perm not in role.included_permissions:
            missing_perms += [ perm ]

    if missing_perms:
        print(f"Permissions missing in role {role.title}, trying to add...")
        try:
            iam_client = iam_admin_v1.IAMClient()
            role.included_permissions += missing_perms
            update_request = iam_admin_v1.UpdateRoleRequest({
                'name': role.name,
                'role': role
            })
            iam_client.update_role(update_request)
        except Exception as e:
            raise WrapInvokerException(e)

# service account name must match [a-zA-Z][a-zA-Z\\d\\-]*[a-zA-Z\\d]
# skip checks if the account is spelled with '@'
def sanitize_sa_name(name: str) -> str:
    if '@' in name:
        return name
    final_name = ""
    for sym in name:
        if sym.isalnum() or sym == '-':
            final_name += sym
        else:
            final_name += "-"
    if not final_name[0].isalpha():
        final_name = 'R' + final_name[1:]
    if not final_name[-1].isalnum():
        final_name = final_name[:-1] + 'R'
    return final_name

# role id (name) must match [a-zA-Z0-9_\.]{3,64}
# expand name with ARO if length is insufficient
def sanitize_role_name(name: str) -> str:
    final_name = ""
    if len(name) < 3:
        final_name = "ARO"
    for sym in name:
        if sym.isalnum() or sym == '_' or sym == '.':
            final_name += sym
        else:
            final_name += "."
    if len(final_name) > 64:
        final_name = final_name[0:64]
    return final_name

def find_or_create_role(parent: Organization | Project | None, role_title: str) -> Optional[Role]:
    iam_client = iam_admin_v1.IAMClient()
    prefix = ''
    if parent:
        # org roles use numeric id (org name), project roles use project id
        if type(parent) is Organization:
            prefix = parent.name
        else:
            prefix = f"projects/{parent.project_id}"
    request = iam_admin_v1.GetRoleRequest({
        'name':f"{prefix + '/' if prefix else ''}roles/{sanitize_role_name(role_title)}"
    })
    try:
        arova_role = iam_client.get_role(request)
        if arova_role.stage != Role.RoleLaunchStage.GA:
            print(f"\tWarning: unexpected Role stage {arova_role.stage.name}; GA expected")
        return arova_role
    except NotFound as exc:
        if not parent:
            return None
        print(f"Role '{role_title}' is not found under {parent.display_name}, attempting to create")
    # Continue creating role under org...
    perm_list = GCLOUD_CONSTS.PERMISSIONS_LIST
    if type(parent) is Organization:
        perm_list += GCLOUD_CONSTS.ORG_PERMISSIONS_LIST
    new_role = Role({
        'title': role_title,
        'description': "Permissions required for AROVA instance to perform management and orchestration",
        'stage': Role.RoleLaunchStage.GA,
        'included_permissions': perm_list
    })
    request = iam_admin_v1.CreateRoleRequest({
        'parent': prefix,
        'role_id': sanitize_role_name(role_title),
        'role': new_role
    })
    return iam_client.create_role(request)

# NB! Policy has dynamically defined type, so cannot be used in type annotations.
def get_project_policy(project: Project):
    proj_client = resourcemanager_v3.ProjectsClient()
    request = iam_policy_pb2.GetIamPolicyRequest(
        resource=project.name
    )
    return proj_client.get_iam_policy(request=request)

def verify_or_add_role(policy, role: Role, sa_email: str):
    expected_member = f"serviceAccount:{sa_email}"
    for binding in policy.bindings:
        if binding.role == role.name:
            for member in binding.members:
                if member == expected_member:
                    break
            else:
                binding.members.append(expected_member)
            break
    else:
        policy.bindings.add(
            role=role.name,
            members=[
                expected_member
            ]
        )

def update_project_policy(project: Project, policy):
    proj_client = resourcemanager_v3.ProjectsClient()
    request = iam_policy_pb2.SetIamPolicyRequest(
        resource=project.name,
        policy=policy
    )
    proj_client.set_iam_policy(request=request)

def cmd_prep(deploy_project_name: str, svc_account: str, role_title: str, org_role: bool,
             project_names: list):
    remotecheck()

    # Fetch destination project.
    deploy_project = find_project(deploy_project_name)

    if not deploy_project:
        print(f"Project {deploy_project_name} not found.")
        return
    print(f"Destination project {deploy_project.display_name} [{deploy_project.name}]")

    arova_role = None
    if org_role:
        # Fetch parent organization
        top_org = find_organization(deploy_project)
        if not top_org:
            print("Warning: project does not belong to any organization, will create roles for projects.")
        else:
            print(f"Organization {top_org.display_name} [{top_org.name}]")

            arova_role = find_or_create_role(top_org, role_title)
            if not arova_role:
                print(f"Role '{role_title}' is not found. Please create one, or request per-project roles.")
                return
            print(f"Using role '{arova_role.title}' / '{arova_role.name}'")
            validate_role(arova_role, True)
    else:
        print("Will create roles per project.")

    svc_acc = find_or_create_svc_account(deploy_project, svc_account)
    print(f"Using service account {svc_acc.email}")

    if deploy_project_name not in project_names:
        project_names += [ deploy_project_name ]

    # Assign role to service account in provided projects.
    for project in project_names:
        dest_project = find_project(project)
        if not dest_project:
            print(f"Warning: project {project} not found.")
            continue
        try:
            policy = get_project_policy(dest_project)
            used_role = arova_role
            if not used_role:
                used_role = find_or_create_role(dest_project, role_title)
                validate_role(used_role)
            verify_or_add_role(policy, used_role, svc_account)
            update_project_policy(dest_project, policy)
            print(f"Updated access policy for project {dest_project.display_name} [{dest_project.name}]")
        except Exception as x:
            print(f"Failed to update policy when assigning role {arova_role.name if arova_role else role_title} " +
                  f"to project {dest_project.display_name} [{dest_project.name}]")
            print(x)
            continue

def eval_for_clean(vm: Instance, expected_region: str, keep_acd: bool, force_del: bool):
    acd_disk = ""
    disks_to_delete = []
    delete_remote = []
    stop_repl = []

    for disk in vm.disks:
        project, region, disk_name = parseDiskUrlToParameters(disk.source)
        if disk.auto_delete:
            print(f"Disk {disk_name} will be auto-deleted")
            continue
        if not disk.boot:
            if region != expected_region:
                print(f"Warning: unexpected disk {disk_name} source region {region} (expected {expected_region}).")
            status_tuple = getAsyncReplStatusForPrimaryDisk(disk_name, expected_region)
            if status_tuple:
                rmt_disk_name, status = status_tuple
                if status == AsyncReplState.ACTIVE or status == AsyncReplState.STARTING:
                    if keep_acd:
                        print(f"Keeping active ACD disk")
                        # Actual zone names are last parts of the zone URL.
                        # ACD disk is guaranteed to have 2.
                        disk = getADCDiskByName(disk_name, region)
                        print(f"    {disk_name} active zones: {disk.replica_zones[0].split('/')[-1]}, {disk.replica_zones[1].split('/')[-1]}")
                        for secondary in disk.async_secondary_disks:
                            disk_full_fqdn = disk.async_secondary_disks[secondary].async_replication_disk.disk
                            proj_2, reg_2, name_2 = parseDiskUrlToParameters(disk_full_fqdn)
                            disk_2 = getADCDiskByName(name_2, reg_2)
                            print(f"    {name_2} active zones: {disk_2.replica_zones[0].split('/')[-1]}, {disk_2.replica_zones[1].split('/')[-1]}")
                        acd_disk = disk_name
                        # avoid adding the disk to the deletion list
                        continue
                    else:
                        if not force_del:
                            print(f"Unexpected ACD disk state: {status.name}")
                            return
                        print(f"Forcing ACD deletion, adding remote disk {rmt_disk_name}.")
                        delete_remote += [ rmt_disk_name ]
                        stop_repl += [ disk.source ]
            else:
                status = AsyncReplState.STATE_UNSPECIFIED
            print(f"{disk_name} -> {status.name}")
        else:
            print(f"Warning: Boot disk {disk_name} will be force deleted")
        disks_to_delete += [ disk.source ]
    if keep_acd and not acd_disk:
        raise ArovaCleanupError(f"VM {vm.name} does not appear to have a valid ACD disk")
    return disks_to_delete, delete_remote, stop_repl

def cmd_clean(project: str, src_region: str, dest_region: str, vm_prefix: str,
              force_del: bool, keep_acd: bool, dry_run: bool):
    remotecheck()

    disks_to_delete = []
    delete_remote = []
    delete_vms = []
    stop_repl = []

    vm, errmsg = findAROVAVM(project, src_region, dest_region, vm_prefix, True)
    if not vm:
        has_name_prefix = f"and name prefix {vm_prefix} " if vm_prefix else ''
        print(f"AROVA VM with dest region {dest_region} {has_name_prefix}not found in dest region {dest_region}")
    else:
        print(f"Secondary AROVA VM {vm.name} found in {dest_region}")
        disks_to_delete_p, delete_remote_p, stop_repl_p = eval_for_clean(vm, dest_region, keep_acd, force_del)
        disks_to_delete += disks_to_delete_p
        delete_remote += delete_remote_p
        stop_repl += stop_repl_p
        delete_vms += [ vm ]

    vm, errmsg = findAROVAVM(project, src_region, dest_region, vm_prefix)
    if not vm:
        has_name_prefix = f"and name prefix {vm_prefix} " if vm_prefix else ''
        print(f"AROVA VM with dest region {dest_region} {has_name_prefix}not found in source region {src_region}")
    else:
        print(f"AROVA VM {vm.name} found in {src_region}")
        disks_to_delete_p, delete_remote_p, stop_repl_p = eval_for_clean(vm, src_region, keep_acd, force_del)
        disks_to_delete += disks_to_delete_p
        delete_remote += delete_remote_p
        stop_repl += stop_repl_p
        delete_vms += [vm]

    if not delete_vms:
        # No VMs found...
        return

    print("---- Operations:")
    for vm in delete_vms:
        print("- Delete VM " + vm.name)
    if stop_repl:
        print("- Stop replication for disks: " + ", ".join(stop_repl))
    print("- Delete disks: " + ", ".join(delete_remote + disks_to_delete))
    if dry_run:
        print("Dry run requested, no operation performed.")
        return

    for vm in delete_vms:
        try:
            client = compute_v1.InstancesClient()
            project, zone = parseZoneUrlToParameters(vm.zone)
            wait(client.delete(project = project, zone = zone, instance = vm.name))
        except Exception as e:
            print(f"Warning: failed to delete VM {vm.name}. Error {e}. No further actions possible.")
            return

    for disk in stop_repl:
        project, region, disk_name = parseDiskUrlToParameters(disk)
        stopReplication(disk_name, region)
        if not waitDiskStatus(disk_name, region, AsyncReplState.STOPPED):
            status = getAsyncReplStatusForSecondaryDisk(disk_name, region)
            if status:
                print(f"Warning: Replication status is {status.name}.")

    for disk in delete_remote + disks_to_delete:
        try:
            project, region, disk_name = parseDiskUrlToParameters(disk)
            deleteDiskByName(project, disk_name, region)
        except Exception as e:
            print(f"Warning: failed to delete disk {disk} - {e}")
    print("Cleanup done.")


# Construct service account email id
def get_sa_email(deploy_project_id: str, sa_prefix: str) -> str:
    return sa_prefix if '@' in sa_prefix else f"{sanitize_sa_name(sa_prefix)}@{deploy_project_id}.iam.gserviceaccount.com"


def exec_deploy(cmdLine, extra_args):
    if extra_args:
        raise IllegalArgumentError(f"Unexpected commandline options provided: {' '.join(extra_args)}")
    # check src_region matches src_pri/sec_zone
    # check dest_region matches dest_pri/sec_zone

    if cmdLine.kms_location and not cmdLine.kms_key:
        print("ERROR: KMS key is not specified.")
        return

    if cmdLine.kms_key and not cmdLine.kms_location:
        print("ERROR: Location of KMS key is not specified.")
        return

    if cmdLine.kms_location and isRegionLocation(cmdLine.kms_location):
        print("ERROR: KMS key is not multi-region.")
        return

    # Validate provided zone names.
    zones = getZoneNames(project_id)
    if cmdLine.src_pri_zone not in zones:
        raise IllegalArgumentError(f"Primary source zone {cmdLine.src_pri_zone} missing in project {project_id}")
    if cmdLine.src_sec_zone not in zones:
        raise IllegalArgumentError(f"Secondary source zone {cmdLine.src_sec_zone} missing in project {project_id}")
    if cmdLine.dest_pri_zone not in zones:
        raise IllegalArgumentError(f"Primary destination zone {cmdLine.dest_pri_zone} missing in project {project_id}")
    if cmdLine.dest_sec_zone not in zones:
        raise IllegalArgumentError(f"Secondary destination zone {cmdLine.dest_sec_zone} missing in project {project_id}")

    cmd_deploy(
        cmdLine.src_region,
        cmdLine.src_pri_zone,
        cmdLine.src_sec_zone,
        cmdLine.dest_region,
        cmdLine.dest_pri_zone,
        cmdLine.dest_sec_zone,
        get_sa_email(project_id, cmdLine.sa),
        cmdLine.subscription_id,
        cmdLine.network,
        cmdLine.vm_prefix,
        cmdLine.kms_location,
        cmdLine.kms_key_ring,
        cmdLine.kms_key)

def exec_recovery(cmdLine, extra_args):
    if extra_args:
        raise IllegalArgumentError(f"Unexpected commandline options provided: {' '.join(extra_args)}")
    if not cmdLine.aro_disk_name:
        if not cmdLine.src_region and not cmdLine.dest_region:
            raise IllegalArgumentError(
                "Either --aro-disk-name or both --src-region and --dest-region must be provided!")
        cmdLine.aro_disk_name = getADCDiskName(cmdLine.src_region, cmdLine.dest_region)

    # Don't need to validate zones, as disk must exist.

    cmd_recovery(
        getRegion(cmdLine.src_pri_zone, cmdLine.src_pri_zone),
        cmdLine.src_pri_zone,
        cmdLine.aro_disk_name,
        get_sa_email(project_id, cmdLine.sa),
        cmdLine.force_stop_replication,
        cmdLine.network,
        cmdLine.vm_prefix)

def exec_prep(cmdLine, extra_args):
    for name in extra_args:
        if name.startswith("-"):
            raise IllegalArgumentError(f"Unexpected parameter {name} provided")
    # extra_args contains project names to assign permissions to.
    cmd_prep(
        cmdLine.project,
        get_sa_email(project_id, cmdLine.sa_prefix),
        cmdLine.role,
        cmdLine.org_role,
        extra_args
    )

def exec_clean(cmdLine, extra_args):
    if extra_args:
        raise IllegalArgumentError(
            f"Unexpected commandline options provided: {' '.join(extra_args)}")
    if not cmdLine.src_pri_zone and not cmdLine.src_region:
        raise IllegalArgumentError(
            "Neither source zone (--src-pri-zone), nor source region (--src-region) provided")
    if cmdLine.src_pri_zone and cmdLine.src_region:
        if cmdLine.src_region != getRegion(cmdLine.src_pri_zone, cmdLine.src_pri_zone):
            raise IllegalArgumentError(
                f"Source zone '{cmdLine.src_pri_zone}' does not match source region {cmdLine.src_region}")

    if cmdLine.keep_acd and cmdLine.force_delete:
        raise IllegalArgumentError(f"--force-delete and --keep-acd options are mutually exclusive.")

    if cmdLine.src_region:
        src_region = cmdLine.src_region
    else:
        src_region = getRegion(cmdLine.src_pri_zone, cmdLine.src_pri_zone)

    cmd_clean(
        cmdLine.project,
        src_region,
        cmdLine.dest_region,
        cmdLine.vm_prefix,
        cmdLine.force_delete,
        cmdLine.keep_acd,
        cmdLine.dry_run
    )

def main():
    cmdLineParser = prepareCmdLineParser()
    cmdLine, leftoverArgs = cmdLineParser.parse_known_args()

    global keep_after_fail
    keep_after_fail = cmdLine.keep_after_fail

    try:
        project = find_project(cmdLine.project)
    except DefaultCredentialsError as e:
        raise GcloudAuthException(e)
    except ServiceUnavailable as e:
        raise WrapInvokerException(e)
    if not project:
        raise IllegalArgumentError(f"ERROR: Project '{cmdLine.project}' not found.")
    # Discard output, only check the session is live. Project was already found.
    runCommand(['gcloud', '-q', 'projects', 'describe', project.project_id])

    global project_id
    project_id = project.project_id

    cmdLine.func(cmdLine, leftoverArgs)

def prepareCmdLineParser():
    # TODO: Add subcommands support and specify parameters for command
    parser = argparse.ArgumentParser(description='AROVA CLI')

    sps = parser.add_subparsers(title='subcommands', metavar='', required=True)

    deploy = sps.add_parser("deploy", help="Deploys initial AROVA to the selected project")
    deploy.add_argument('--src-region', type=str, help='Source region')
    deploy.add_argument('--src-pri-zone', type=str, help='Primary source zone', required=True)
    deploy.add_argument('--src-sec-zone', type=str, help='Secondary source zone', required=True)
    deploy.add_argument('--dest-region', type=str, help='Destination region')
    deploy.add_argument('--dest-pri-zone', type=str, help='Primary destination zone', required=True)
    deploy.add_argument('--dest-sec-zone', type=str, help='Secondary destination zone', required=True)
    deploy.add_argument('--sa', type=str, help='Service account (e-mail or prefix)', required=True)
    deploy.add_argument('--subscription-id', type=str, help='Subscription ID', required=True)
    deploy.add_argument('--network', type=str, help='Name of network attached to AROVA VM')
    deploy.add_argument('--vm-prefix', type=str, help='Prefix for name of AROVA VM')
    deploy.add_argument("--project", type=str, help="Deploy destination project name", required=True)
    deploy.add_argument("--kms-location", type=str, help="Location of key ring")
    deploy.add_argument("--kms-key-ring", type=str, help="Name of key ring")
    deploy.add_argument("--kms-key", type=str, help="Name of key to encrypt/decrypt AROVA disk")
    deploy.add_argument("--keep-after-fail", type=bool, action=argparse.BooleanOptionalAction, help=argparse.SUPPRESS)
    deploy.set_defaults(func=exec_deploy, keep_after_fail=False)

    restore = sps.add_parser("recovery", help="Deploys restore AROVA to the selected project")
    restore.add_argument('--src-pri-zone', type=str, help='Primary source zone', required=True)
    restore.add_argument('--dest-region', type=str, help='Destination region')
    restore.add_argument('--src-region', type=str, help='Source region')
    restore.add_argument('--aro-disk-name', type=str, help='AROVA disk name')
    restore.add_argument('--force-stop-replication', '--force',
        action='store_true', help="Force cleanup, old resource deletion, and replication state override")
    restore.add_argument('--sa', type=str, help='Service account (e-mail or prefix)', required=True)
    restore.add_argument('--network', type=str, help='Name of network attached to AROVA VM')
    restore.add_argument('--vm-prefix', type=str, help='Prefix for name of AROVA VM')
    restore.add_argument("--project", type=str, help="Deploy destination project name", required=True)
    restore.add_argument("--keep-after-fail", type=bool, action=argparse.BooleanOptionalAction, help=argparse.SUPPRESS)
    restore.set_defaults(func=exec_recovery, keep_after_fail=False, force_stop_replication=False)

    clean = sps.add_parser("clean", help="Cleans up AROVA for the selected region pair")
    clean.add_argument('--src-pri-zone', type=str, help='Primary source zone')
    clean.add_argument('--src-region', type=str, help='Primary source zone')
    clean.add_argument('--dest-region', type=str, help='Destination region', required=True)
    clean.add_argument('--vm-prefix', type=str, help='Prefix for name of AROVA VM')
    clean.add_argument('-u', '--keep-acd', action='store_true', help='Keep ACD to upgrade AROVA VM')
    clean.add_argument("--project", type=str, help='Deploy destination project name', required=True)
    clean.add_argument('-f', '--force-delete', action='store_true', help='Force delete active replication database')
    clean.add_argument('-n', '--dry-run', action='store_true', help='Print GCP objects that will be processed during the clean procedure, no operation performed')
    clean.set_defaults(func=exec_clean, force_delete=False, keep_after_fail=False, keep_acd=False, dry_run=False)

    prep = sps.add_parser("prepare-permissions", help="Configures AROVA service account to access selected projects")
    prep.add_argument("--project", type=str, help="Deploy destination project name", required=True)
    prep.add_argument('--sa-prefix', type=str, help='Service account prefix', required=True)
    prep.add_argument('--role', type=str, help='Role name to be used for service account', default="arova_access")
    prep.add_argument('--org-role', help='Place role at the org level', action='store_true')
    prep.add_argument("--keep-after-fail", type=bool, action=argparse.BooleanOptionalAction, help=argparse.SUPPRESS)
    prep.set_defaults(func=exec_prep, keep_after_fail=False)

    return parser

if __name__ == "__main__":
    exit_code = 0
    try:
        selfcheck()
        check_required_utils()
        main()
    except ArovaCliExceptionBase as x:
        print(f"\n\tException encountered: {x}\n")
        writeCmdToLog("Exception during script execution", "", str(x))
        if not keep_after_fail and rollback_fns:
            print("Removing the created components.")
            for msg, fn in reversed(rollback_fns):
                print(f"Cleanup: {msg}")
                try:
                    fn()
                except NotFound as e:
                    writeCmdToLog(msg, "Exception during cleanup", str(e))
                    print("\tAlready deleted.")
                except Exception as e:
                    writeCmdToLog(msg, "Exception during cleanup", str(e))
                    print("\tUnsuccessful. Please try deleting manually.")
        exit_code = 1
    if os.path.exists(AROVAVM_CONSTS.EXEC_LOG):
        print("Detailed AROVA execution {}logs saved to {}".format(
            "and exception " if exit_code else "", AROVAVM_CONSTS.EXEC_LOG))

    exit(exit_code)
