initial layout for azure hosted ephmeral runners

pull/444/head
cloudymax 2022-09-28 09:32:03 +02:00
parent 579daa93a6
commit 94052dbc75
22 changed files with 2008 additions and 0 deletions

37
dist/platforms/azure/.gitignore vendored 100755
View File

@ -0,0 +1,37 @@
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
# Ignore any .tfvars files that are generated automatically for each Terraform run. Most
# .tfvars files are managed as part of configuration and so should be included in
# version control.
#
# example.tfvars
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
./scratch.md
.DS_Store
.blocked
.infracost
.terraform.lock.hcl
.vscode
.github/workflows/secrets

182
dist/platforms/azure/README.md vendored 100755
View File

@ -0,0 +1,182 @@
# Azure Taregt
This terraform project will set up a pre-defined number of ephemeral, isolated, and scalable VM pools on Azure that can be used as self-hosted runners and other things. You will need an Azure Account, a subscription, the azure cli, and access to a machine that can run docker. Please be aware that while this demo uses the minimum possible billable srvices, costs may still be incurred from usage amounts, changing vm sizes, and public ip address usage. To further minimize costs, you may scale to 0, or destroy the entire infrastucture when it is not in use. The process for a full rebuild is only a few minutes.
**What you get in each environment:
- Virtual Machine Scale Set
- Network Security Groups + firewall rules
- Container Registry
- Key Vault
- S3 compatible Storage Account
- IAM/RBAC for service accounts and users
- Public Ip addresses
- Pre-configured SSH credentials
- Cloud-init pre-provisioning
## Usage
1. Create an Azure Account and subscription. Record your UserID.
2. Run az login from the cli
3. Create a Service Principle
```bash
SUBSCRIPTION=$(az account show --query id --output tsv)
SP_NAME="myserviceaccount"
az ad sp create-for-rbac --sdk-auth \
--display-name="${SP_NAME}" \
--role="Owner" \
--scopes="/subscriptions/$SUBSCRIPTION"
```
3. save the output
```json
{
"clientId": "",
"clientSecret": "",
"subscriptionId": "",
"tenantId": "",
"activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
"resourceManagerEndpointUrl": "https://management.azure.com/",
"activeDirectoryGraphResourceId": "https://graph.windows.net/",
"sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
"galleryEndpointUrl": "https://gallery.azure.com/",
"managementEndpointUrl": "https://management.core.windows.net/"
}
```
4. Create the Terraform State's Resource Group, Bucket, and Container
```bash
export SUBSCRIPTION=$(az account show --query id --output tsv)
export KIND="StorageV2"
export LOCATION="westeurope"
export RG_NAME="terraform-state"
export STORAGE_NAME="jumphoststate"
export STORAGE_SKU="Standard_RAGRS"
export CONTAINER_NAME="state"
az group create \
-l="${LOCATION}" \
-n="${RG_NAME}"
az storage account create \
--name "${STORAGE_NAME}" \
--resource-group "${RG_NAME}" \
--location "${LOCATION}" \
--sku "${STORAGE_SKU}" --kind "${KIND}"
az storage account encryption-scope create \
--account-name "${STORAGE_NAME}" \
--key-source "Microsoft.Storage" --name "tfencryption" \
--resource-group "${RG_NAME}" \
--subscription "${SUBSCRIPTION}"
az storage container create \
--name "${CONTAINER_NAME}" \
--account-name "${STORAGE_NAME}" \
--resource-group "${RG_NAME}" \
--default-encryption-scope "tfencryption" \
--prevent-encryption-scope-override "true" --auth-mode "login" \
--fail-on-exist \
--public-access "off"
```
5. Update the providers.tf file with the bucket data
6. update main.tf with subscription and tenant ids
5. run terraform init
```bash
docker pull hashicorp/terraform:latest && \
docker run --platform linux/amd64 -it \
-e ARM_CLIENT_ID='' \
-e ARM_CLIENT_SECRET='' \
-e ARM_SUBSCRIPTION_ID='' \
-e ARM_TENANT_ID='' \
-v $(pwd):/workspace \
-w /workspace \
hashicorp/terraform:latest init
```
6. Import bucket and resource group into terraform:
```bash
STATE_RG_ID=$(az group list \
--query "[?name=='$RG_NAME'].id" \
--output tsv)
STATE_BUCKET_ID=$(az storage account list \
--resource-group $RG_NAME \
--query "[*].id" \
--output tsv)
docker pull hashicorp/terraform:latest && \
docker run --platform linux/amd64 -it \
-e ARM_CLIENT_ID='' \
-e ARM_CLIENT_SECRET='' \
-e ARM_SUBSCRIPTION_ID='' \
-e ARM_TENANT_ID='' \
-v $(pwd):/workspace \
-w /workspace \
hashicorp/terraform:latest import azurerm_resource_group.state_rg $STATE_RG_ID
docker pull hashicorp/terraform:latest && \
docker run --platform linux/amd64 -it \
-e ARM_CLIENT_ID='' \
-e ARM_CLIENT_SECRET='' \
-e ARM_SUBSCRIPTION_ID='' \
-e ARM_TENANT_ID='' \
-v $(pwd):/workspace \
-w /workspace \
hashicorp/terraform:latest import azurerm_storage_account.state_bucket $STATE_BUCKET_ID
```
7. Run terraform apply
```bash
docker pull hashicorp/terraform:latest && \
docker run --platform linux/amd64 -it \
-e ARM_CLIENT_ID='' \
-e ARM_CLIENT_SECRET='' \
-e ARM_SUBSCRIPTION_ID='' \
-e ARM_TENANT_ID='' \
-v $(pwd):/workspace \
-w /workspace \
hashicorp/terraform:latest apply
```
8. Tear down VM scale set
```bash
docker pull hashicorp/terraform:latest && \
docker run --platform linux/amd64 -it \
-e ARM_CLIENT_ID='' \
-e ARM_CLIENT_SECRET='' \
-e ARM_SUBSCRIPTION_ID='' \
-e ARM_TENANT_ID='' \
-v $(pwd):/workspace \
-w /workspace \
hashicorp/terraform:latest destroy --target=module.virtual-machine-scale-set -auto-approve
```
9. Tear Down base resources
```bash
docker run --platform linux/amd64 -it \
-e ARM_CLIENT_ID='' \
-e ARM_CLIENT_SECRET='' \
-e ARM_SUBSCRIPTION_ID='' \
-e ARM_TENANT_ID='' \
-v $(pwd):/workspace \
-w /workspace \
hashicorp/terraform:latest destroy --target=module.environment-base -auto-approve
```

156
dist/platforms/azure/firewall.sh vendored 100755
View File

@ -0,0 +1,156 @@
#!/bin/bash
########################################################################################
# This script will add the IP address of the current machine
# to the firewalls (if they exist) of all environments
# You will need to ensure the IP address of the device you will
# be using to run terraform is added, or you wont be able to do anything.
#
# Run `firewall.sh add` from the terraform runner to add it's IP to the firewalls.
# Run `firewall.sh remove` to delete the IP address.
#
# - Max
##########################################################################################
get_values(){
log "-- 🏞️ Environment: medicalvr-$1 🏞️--"
log "🌎 Finding the public ip of the current machine..."
export IP_ADDRESS=$(curl https://api.ipify.org)
log "🪵 Finding the log storage container name..."
export STORAGE_NAME=$(az storage account list --resource-group "medicalvr-$1" --query "[?contains(name, '$1logsnbackups')].name" -o tsv)
log "🔐 Finding the key vault name..."
export KV_NAME=$(az keyvault list --resource-group "medicalvr-$1" --query "[*].name" --output tsv)
log "🔎 Finding ACR Name..."
export ACR_NAME=$(az acr list --resource-group "medicalvr-$1" --query "[*].name" --output tsv)
}
add_to_firewalls(){
get_values $1
log "📡 Addding address to log storage firewall..."
EXISTS=$(az storage account network-rule list --account-name $1logsnbackups --query "ipRules[*].ipAddressOrRange" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "0" ]; then
az storage account network-rule add \
--resource-group "medicalvr-$1" \
--account-name $STORAGE_NAME \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already present."
fi
log "📡 Addding address to KeyVault Firewall..."
EXISTS=$(az keyvault network-rule list --name $KV_NAME --query "ipRules[*].value" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "0" ]; then
az keyvault network-rule add \
--name $KV_NAME \
--resource-group "medicalvr-$1" \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already present."
fi
log "📡 Adding address to ACR firewall..."
EXISTS=$(az acr network-rule list -n $ACR_NAME --query "ipRules[*].ipAddressOrRange" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "0" ]; then
az acr network-rule add \
-n $ACR_NAME \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already present."
fi
}
remove_from_firewalls(){
get_values $1
log "🧹 Removing address from log storage firewall..."
EXISTS=$(az storage account network-rule list --account-name $1logsnbackups --query "ipRules[*].ipAddressOrRange" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "1" ]; then
az storage account network-rule remove \
--resource-group "medicalvr-$1" \
--account-name $STORAGE_NAME \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already absent."
fi
log "🧹 Removing address from to KeyVault Firewall..."
EXISTS=$(az keyvault network-rule list --name $KV_NAME --query "ipRules[*].value" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "1" ]; then
az keyvault network-rule remove \
--name $KV_NAME \
--resource-group "medicalvr-$1" \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already absent."
fi
log "🧹 Removing address from to ACR firewall..."
EXISTS=$(az acr network-rule list -n $ACR_NAME --query "ipRules[*].ipAddressOrRange" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "1" ]; then
az acr network-rule remove \
-n $ACR_NAME \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already absent."
fi
}
add_to_state(){
log "-- 🪣 State Bucket 🪣 --"
log "Adding address to state storage firewall..."
EXISTS=$(az storage account network-rule list --account-name "medicalvrterraformdata" --query "ipRules[*].ipAddressOrRange" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "0" ]; then
az storage account network-rule add \
--resource-group "terraform-iac" \
--account-name "medicalvrterraformdata" \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already present."
fi
}
remove_from_state(){
log "-- 🪣 State Bucket 🪣 --"
log "Removing address to state storage firewall..."
EXISTS=$(az storage account network-rule list --account-name "medicalvrterraformdata" --query "ipRules[*].ipAddressOrRange" --output tsv |grep -c "$IP_ADDRESS")
if [ "$EXISTS" -eq "1" ]; then
az storage account network-rule remove \
--resource-group "terraform-iac" \
--account-name "medicalvrterraformdata" \
--ip-address $IP_ADDRESS >> firewall.log
log " ➡️ done."
else
log " ➡️ address already absent."
fi
}
# Logging method
log() {
echo >&2 -e "[$(date +"%Y-%m-%d %H:%M:%S")] ${1-}"
}
add(){
add_to_firewalls development
add_to_firewalls production
add_to_state $IP_ADDRESS
log "Waiting..."
sleep 10
log "Done!"
}
remove(){
remove_from_firewalls development
remove_from_firewalls production
remove_from_state $IP_ADDRESS
}
"$@"

125
dist/platforms/azure/main.tf vendored 100755
View File

@ -0,0 +1,125 @@
data "azurerm_client_config" "current" {
}
resource "azurerm_resource_group" "state_rg" {
name = "terraform-state"
location = "West Europe"
}
resource "azurerm_storage_account" "state_bucket" {
name = "jumphoststate"
account_tier = "Standard"
account_replication_type = var.account_replication_type
enable_https_traffic_only = true
min_tls_version = "TLS1_2"
resource_group_name = "terraform-state"
location = var.location
network_rules {
default_action = "Deny"
bypass = ["AzureServices", "Logging", "Metrics"]
ip_rules = var.allowed_ips
}
lifecycle {
#prevent_destroy = true
ignore_changes = [
network_rules
]
}
}
module "environment-base" {
source = "./modules/environment-base"
for_each = var.environment
# Project settings
environment = each.value
location = var.location
resource_group = "${var.resource_group}-${each.value}"
subscription_id = data.azurerm_client_config.current.subscription_id
tenant_id = data.azurerm_client_config.current.tenant_id
runner_object_id = data.azurerm_client_config.current.object_id
allowed_ips = var.allowed_ips
# Identities
admin_identity = "${each.value}-identity"
admin_users = var.admin_users
# Virtual Network
vnet_name = var.vnet_name
vnet_address_space = var.vnet_address_space
vnet_subnet_name = var.vnet_subnet_name
subnet_prefixes = ["10.0.0.0/8"]
# Container Registry
cr_name = var.cr_name
cr_sku = var.cr_sku[each.key]
public_network_access_enabled = var.public_network_access_enabled[each.key]
# Storage
storage_acct_name = var.storage_acct_name
account_tier = var.account_tier[each.key]
account_replication_type = var.account_replication_type
log_storage_tier = var.log_storage_tier
#KeyVault
kv_name = "${each.value}-${var.kv_name}"
kv_sku_ame = var.kv_sku_name[each.key]
}
module "virtual-machine-scale-set" {
source = "./modules/virtual-machine-scale-set"
for_each = var.environment
# Project settings
environment = each.value
location = var.location
resource_group = "${var.resource_group}-${each.value}"
allowed_ips = var.allowed_ips
# Virtual Network
vnet_name = var.vnet_name
vnet_subnet_name = var.vnet_subnet_name
subnet_prefixes = ["10.0.0.0/27"]
network_security_group = module.environment-base[each.key].network_security_group
# KeyVault
kv_name = "${each.value}-${var.kv_name}"
kv_id = module.environment-base[each.key].kv_id
kv_key_name = var.kv_key_name
kv_key_type = var.kv_key_type
kv_key_size = var.kv_key_size
# Virtual Machine Network Interface
vm_net_iface_name = var.vm_net_iface_name
vm_net_iface_ipconfig_name = var.vm_net_iface_ipconfig_name
vm_net_iface_private_ip_address_allocation = var.vm_net_iface_private_ip_address_allocation
# Virtual Machine
vm_name = var.vm_name
vm_computer_name = var.vm_computer_name
vm_size = var.vm_size[each.key]
vm_admin_username = var.vm_admin_username
admin_users = ["${module.environment-base[each.key].managed_identity_id}"]
# Virtual Machine Disk
vm_os_disk_caching = var.vm_os_disk_caching
vm_os_disk_size_gb = var.vm_os_disk_size_gb[each.key]
vm_storage_account_type = var.vm_storage_account_type
vm_source_image_publisher = var.vm_source_image_publisher
vm_source_image_offer = var.vm_source_image_offer
vm_source_image_sku = var.vm_source_image_sku
vm_source_image_verson = var.vm_source_image_verson
# Logs
storage_account_url = module.environment-base[each.key].storage_account.primary_blob_endpoint
depends_on = [
module.environment-base
]
}

View File

@ -0,0 +1,122 @@
# Base Environment
This module will create the basic building-blocks for a deployment environment (Dev/Test/Prod).
## Resource Group
- A resource group to hold all the resources for this environment
## Accounts and Identities
- A Managed Identity owned by the terraform runner w/ a randomly generated name
- An azure application owned by the terraform runner
- An azure service principal assigned to the application and owned by the terraform runner
## Container Registry
- A container Registry with a randomized name assigned to the managed identity
- A container registry webhook (currently created but unused)
## Keys and Secrets
- An Azure Key Vault with a random name
- An azure Key Vault Access Policy for the terraform runner, and managed identity
## Storage
- An Azure Storage Account
- An azure blob container
- Azure SAS urls (move to app service module)
- A rotating time resource for certificate expiration
## Networking
- A top-level virtual network
- A network security group
- Inbound and Outbound security rules
## Usage
```hcl
module "environment-base" {
source = "./environment-base"
for_each = var.environment
# Project settings
environment = each.value
location = var.location
resource_group = "${var.resource_group}-${each.value}"
subscription_id = data.azurerm_client_config.current.subscription_id
tenant_id = data.azurerm_client_config.current.tenant_id
runner_object_id = data.azurerm_client_config.current.object_id
# Identities
admin_identity = "${each.value}-identity"
# Virtual Network
vnet_name = var.vnet_name
vnet_address_space = var.vnet_address_space
vnet_subnet_name = var.vnet_subnet_name
subnet_prefixes = ["10.0.1.0/16"]
# Container Registry
cr_name = var.cr_name
cr_sku = var.cr_sku[each.key]
# Storage
storage_acct_name = var.storage_acct_name
account_tier = var.account_tier[each.key]
account_replication_type = var.account_replication_type
log_storage_tier = var.log_storage_tier
#KeyVault
kv_name = "${each.value}-${var.kv_name}"
kv_sku_ame = var.kv_sku_name[each.key]
}
```
## Outputs
```hcl
output "kv_id" {
value = azurerm_key_vault.key_vault.id
}
output "vnet_id" {
value = azurerm_virtual_network.virtual_network.id
}
output "vnet_name" {
value = azurerm_virtual_network.virtual_network.name
}
output "managed_identity" {
value = azurerm_user_assigned_identity.admin_identity
}
output "managed_identity_name" {
value = azurerm_user_assigned_identity.admin_identity.name
}
output "managed_identity_client_id" {
value = azurerm_user_assigned_identity.admin_identity.client_id
}
output "managed_identity_id" {
value = azurerm_user_assigned_identity.admin_identity.id
}
output "storage_account" {
value = azurerm_storage_account.storage_account
}
output "log_contaier" {
value = azurerm_storage_container.log_container
}
output "log_contaier_id" {
value = azurerm_storage_container.log_container.id
}
output "log_contaier_sas" {
value = data.azurerm_storage_account_blob_container_sas.website_logs_container_sas.sas
}
output "conatiner_registry" {
value = azurerm_container_registry.container_registry
}
output "network_security_group" {
value = azurerm_network_security_group.netsec_group
}
```

View File

@ -0,0 +1,62 @@
resource "random_string" "random" {
length = 8
special = false
override_special = "!#$%&*()-_=+[]{}<>:?"
}
resource "azurerm_container_registry" "container_registry" {
name = "${var.environment}CR${random_string.random.result}"
resource_group_name = azurerm_resource_group.resource_group.name
location = azurerm_resource_group.resource_group.location
sku = var.cr_sku
public_network_access_enabled = var.public_network_access_enabled
admin_enabled = true
identity {
type = "UserAssigned"
identity_ids = [
azurerm_user_assigned_identity.admin_identity.id
]
}
depends_on = [
random_string.random
]
}
resource "azurerm_container_registry_webhook" "webhook" {
name = "mywebhook"
resource_group_name = azurerm_resource_group.resource_group.name
registry_name = azurerm_container_registry.container_registry.name
location = azurerm_resource_group.resource_group.location
# Specifies the service URI for the Webhook to post notifications.
service_uri = "https://mywebhookreceiver.example/mytag"
status = "enabled"
# Specifies the scope of repositories that can trigger an event. For example, foo:* means events for all tags under repository foo. foo:bar means events for 'foo:bar' only. foo is equivalent to foo:latest. Empty means all events
scope = ""
# A list of actions that trigger the Webhook to post notifications. At least one action needs to be specified. Valid values are: push, delete, quarantine, chart_push, chart_delete
actions = ["push"]
}
resource "azurerm_role_assignment" "pull" {
count = length(local.all_users)
scope = "/subscriptions/${var.subscription_id}/resourceGroups/${azurerm_resource_group.resource_group.name}"
role_definition_name = "AcrPull"
principal_id = local.all_users[count.index]
}
resource "azurerm_role_assignment" "push" {
count = length(local.all_users)
scope = "/subscriptions/${var.subscription_id}/resourceGroups/${azurerm_resource_group.resource_group.name}"
role_definition_name = "AcrPush"
principal_id = local.all_users[count.index]
}

View File

@ -0,0 +1,24 @@
####################
# AzureRM Identity #
####################
# get the credentials of the terraform user
data "azuread_client_config" "current" {}
# generate a ranomd identity name
resource "random_pet" "identity" {
length = 2
separator = "x"
}
# create the user-assigned managed identity
resource "azurerm_user_assigned_identity" "admin_identity" {
resource_group_name = azurerm_resource_group.resource_group.name
location = azurerm_resource_group.resource_group.location
name = random_pet.identity.id
depends_on = [
random_pet.identity
]
}

View File

@ -0,0 +1,69 @@
##############################
# Azure Keyvault and Secrets #
##############################
# make up a name for the keyvault
resource "random_pet" "key_vault" {
length = 2
separator = "x"
}
# create the keyvault
resource "azurerm_key_vault" "key_vault" {
name = random_pet.key_vault.id
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
tenant_id = azurerm_user_assigned_identity.admin_identity.tenant_id
sku_name = var.kv_sku_ame
soft_delete_retention_days = 7
purge_protection_enabled = false
network_acls {
default_action = "Deny"
bypass = "AzureServices"
ip_rules = var.allowed_ips
}
depends_on = [
random_pet.key_vault
]
lifecycle {
ignore_changes = [
network_acls
]
}
}
# squash our list of users and list of generated ids into a single list
locals {
generated_users = tolist(["${azurerm_user_assigned_identity.admin_identity.principal_id}", "${var.runner_object_id}"])
all_users = concat(var.admin_users, local.generated_users)
}
# grant pemrissions to all in the list so we can access the vault we just created
resource "azurerm_key_vault_access_policy" "admins" {
count = length(local.all_users)
key_vault_id = azurerm_key_vault.key_vault.id
tenant_id = var.tenant_id
object_id = local.all_users[count.index]
certificate_permissions = [
"Backup", "Create", "Delete", "DeleteIssuers", "Get", "GetIssuers", "Import", "List", "ListIssuers", "ManageContacts", "ManageIssuers", "Purge", "Recover", "Restore", "SetIssuers", "Update"
]
key_permissions = [
"Get", "Backup", "Create", "Delete", "Decrypt", "Encrypt", "List", "Import", "Purge", "Recover", "Restore", "Sign", "Update", "Verify"
]
secret_permissions = [
"Get", "Delete", "Backup", "List", "Set", "Purge", "Restore", "Recover"
]
depends_on = [
azurerm_key_vault.key_vault, azurerm_user_assigned_identity.admin_identity
]
}

View File

@ -0,0 +1,38 @@
#########################
# Azure Virtual Network #
#########################
# Top level virtual network
resource "azurerm_virtual_network" "virtual_network" {
name = "${var.environment}-${var.vnet_name}"
address_space = var.vnet_address_space
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
}
# Security group for the network that will hold our rules
resource "azurerm_network_security_group" "netsec_group" {
name = "netsec"
resource_group_name = azurerm_resource_group.resource_group.name
location = azurerm_resource_group.resource_group.location
tags = {
environment = var.environment
}
}
# Network Security rule to allow ssh form approved IPs
resource "azurerm_network_security_rule" "ssh" {
name = "inboundSSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefixes = var.allowed_ips
destination_address_prefix = "*"
resource_group_name = azurerm_resource_group.resource_group.name
network_security_group_name = azurerm_network_security_group.netsec_group.name
}

View File

@ -0,0 +1,53 @@
output "kv_id" {
value = azurerm_key_vault.key_vault.id
}
output "vnet_id" {
value = azurerm_virtual_network.virtual_network.id
}
output "vnet_name" {
value = azurerm_virtual_network.virtual_network.name
}
output "managed_identity" {
value = azurerm_user_assigned_identity.admin_identity
}
output "managed_identity_name" {
value = azurerm_user_assigned_identity.admin_identity.name
}
output "managed_identity_client_id" {
value = azurerm_user_assigned_identity.admin_identity.client_id
}
output "managed_identity_id" {
value = azurerm_user_assigned_identity.admin_identity.id
}
output "storage_account" {
value = {
name = "${azurerm_storage_account.storage_account.name}"
id = "${azurerm_storage_account.storage_account.id}"
primary_access_key = "${azurerm_storage_account.storage_account.primary_access_key}"
primary_blob_endpoint = "${azurerm_storage_account.storage_account.primary_blob_endpoint}"
primary_connection_string = "${azurerm_storage_account.storage_account.primary_connection_string}"
primary_blob_connection_string = "${azurerm_storage_account.storage_account.primary_blob_connection_string}"
}
}
output "log_contaier" {
value = azurerm_storage_container.log_container
}
output "log_contaier_id" {
value = azurerm_storage_container.log_container.id
}
output "conatiner_registry" {
value = azurerm_container_registry.container_registry
}
output "network_security_group" {
value = azurerm_network_security_group.netsec_group
}
output "container_registry_admin_username" {
value = azurerm_container_registry.container_registry.admin_username
}
output "container_registry_admin_password" {
value = azurerm_container_registry.container_registry.admin_password
sensitive = true
}
output "container_registry_server_url" {
value = azurerm_container_registry.container_registry.login_server
}

View File

@ -0,0 +1,8 @@
###################
# Resource Group #
###################
resource "azurerm_resource_group" "resource_group" {
name = var.resource_group
location = var.location
}

View File

@ -0,0 +1,53 @@
# key name
resource "random_pet" "vault_encryption" {
length = 2
separator = "x"
}
# Storage account with a container for logs and backups.
resource "azurerm_storage_account" "storage_account" {
name = "${var.environment}${var.storage_acct_name}"
resource_group_name = azurerm_resource_group.resource_group.name
location = azurerm_resource_group.resource_group.location
account_tier = var.account_tier
account_replication_type = var.account_replication_type
enable_https_traffic_only = true
min_tls_version = "TLS1_2"
network_rules {
default_action = "Deny"
bypass = ["AzureServices", "Logging", "Metrics"]
ip_rules = var.allowed_ips
}
identity {
type = "UserAssigned"
identity_ids = [
azurerm_user_assigned_identity.admin_identity.id
]
}
lifecycle {
#prevent_destroy = true
ignore_changes = [
network_rules
]
}
}
# container for logs
resource "azurerm_storage_container" "log_container" {
name = "jumphostlogs"
storage_account_name = azurerm_storage_account.storage_account.name
container_access_type = "private"
depends_on = [
azurerm_storage_account.storage_account
]
#lifecycle {
# prevent_destroy = true
#}
}

View File

@ -0,0 +1,117 @@
variable "admin_identity" {
description = "Managed Identity created on deployment who will control the app services"
type = string
}
variable "environment" {
description = "deployment environment - dev/staging/prod"
type = string
default = "dev"
}
variable "resource_group" {
description = "the azure resource group that will hold our stuff"
type = string
}
variable "location" {
description = "geo region where our items will be created"
type = string
default = "West Europe"
}
variable "vnet_name" {
description = "name of the outer-most virtual network boundary"
type = string
}
variable "vnet_address_space" {
description = "address space for the outer vnet"
type = list(any)
default = ["10.0.0.0/16"]
}
variable "allowed_ips" {
description = "addresses allowed to access the infra"
type = list(string)
}
variable "vnet_subnet_name" {
description = "internal subnet name"
type = string
}
variable "subnet_prefixes" {
description = "internal subnet prefixes"
type = list(any)
}
variable "cr_name" {
description = "Name for the container registry"
type = string
}
variable "cr_sku" {
description = "SKU for the container registry: Basic, Standard and Premium."
type = string
}
variable "kv_name" {
description = "Name for the keyvault"
type = string
}
variable "kv_sku_ame" {
description = "SKU of the keyvault service: standard and premium"
type = string
}
variable "storage_acct_name" {
description = "Storage account name for the account that will hold out logs/backups"
type = string
}
variable "account_tier" {
description = "logging storage account tier: Defines the Tier to use for this storage account. Valid options are Standard and Premium. For BlockBlobStorage and FileStorage accounts only Premium is valid. Changing this forces a new resource to be created."
type = string
}
variable "account_replication_type" {
description = " Defines the type of replication to use for this storage account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. Changing this forces a new resource to be created when types LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice versa."
type = string
}
variable "log_storage_tier" {
description = "Defines the access tier for BlobStorage, FileStorage and StorageV2 accounts. Valid options are Hot and Cool, defaults to Hot"
type = string
}
variable "tenant_id" {
description = "value"
type = string
}
variable "subscription_id" {
description = "value"
type = string
}
variable "runner_object_id" {
description = "value"
type = string
}
variable "log_retention_in_days" {
description = "The time in days after which to remove blobs. A value of 0 means no retention."
type = number
default = 7
}
variable "public_network_access_enabled" {
description = "decide if the contaier registry will be locked dow or not, only available on premium tier"
}
variable "admin_users" {
description = "object_id's for users /groups that will get admin access to things"
type = list(string)
}

View File

@ -0,0 +1,72 @@
# Azure Virtual Machines
This module will create a Virtual Machine that integrates with the `environment-base` module.
Creates:
- Subnet on the main virtual network
- Public IP address
- Virtual Network Interface
- Virtual Disk
- Public and Private RSA Key Pair inside the main key-vault
- Linux Virtual Machine
- Bastion login device
## Usage
```hcl
module "virtual-machine" {
source = "./modules/virtual-machine"
for_each = var.environment
# Project settings
environment = each.value
location = var.location
resource_group = "${var.resource_group}-${each.value}"
secret_rotation_days = var.secret_rotation_days
# Virtual Network
vnet_name = var.vnet_name
vnet_subnet_name = var.vnet_subnet_name
subnet_prefixes = ["10.0.0.0/27"]
network_security_group = module.environment-base[each.key].network_security_group
# KeyVault
kv_name = "${each.value}-${var.kv_name}"
kv_id = module.environment-base[each.key].kv_id
kv_key_name = var.kv_key_name
kv_key_type = var.kv_key_type
kv_key_size = var.kv_key_size
# Virtual Machine Network Interface
vm_net_iface_name = var.vm_net_iface_name
vm_net_iface_ipconfig_name = var.vm_net_iface_ipconfig_name
vm_net_iface_private_ip_address_allocation = var.vm_net_iface_private_ip_address_allocation
# Virtual Machine
vm_name = var.vm_name
vm_computer_name = var.vm_computer_name
vm_size = var.vm_size[each.key]
vm_admin_username = var.vm_admin_username
admin_users = ["${module.environment-base[each.key].managed_identity_id}"]
# Virtual Machine Disk
vm_os_disk_caching = var.vm_os_disk_caching
vm_os_disk_size_gb = var.vm_os_disk_size_gb[each.key]
vm_storage_account_type = var.vm_storage_account_type
vm_source_image_publisher = var.vm_source_image_publisher
vm_source_image_offer = var.vm_source_image_offer
vm_source_image_sku = var.vm_source_image_sku
vm_source_image_verson = var.vm_source_image_verson
# Logs
storage_account_url = module.environment-base[each.key].storage_account.primary_blob_endpoint
depends_on = [
module.environment-base
]
}
```

View File

@ -0,0 +1,33 @@
#cloud-config
users:
- name: vmadmin
gecos: VM adminsitrator
groups: users, admin, docker, sudo
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
lock_passwd: false
passwd: "$6$rounds=4096$saltsaltlettuce$Lp/FV.2oOgew7GbM6Nr8KMGMBn7iFM0x9ZwLqtx9Y4QJmKvfcnS.2zx4MKmymCPQGpHS7gqYOiqWjvdCIV2uN."
apt:
primary:
- arches: [default]
uri: http://us.archive.ubuntu.com/ubuntu/
sources:
kubectl.list:
source: deb [arch=amd64] https://apt.kubernetes.io/ kubernetes-xenial main
keyid: 59FE0256827269DC81578F928B57C5C2836F4BEB
helm.list:
source: deb https://baltocdn.com/helm/stable/debian/ all main
keyid: 81BF832E2F19CD2AA0471959294AC4827C1A168A
package_update: true
package_upgrade: true
packages:
- kubectl
- wget
- helm
- htop
- docker.io
- build-essential
- python3-pip
- procps
- file
- whois

View File

@ -0,0 +1,33 @@
##############################
# Virtual Machine Networking #
##############################
# Subnet for the VM
resource "azurerm_subnet" "vm_subnet" {
name = "vm${var.vnet_subnet_name}"
resource_group_name = var.resource_group
virtual_network_name = "${var.environment}-${var.vnet_name}"
address_prefixes = var.subnet_prefixes
}
# Create Network Security Group
resource "azurerm_network_security_group" "vm_security_group" {
name = "VmNetworkSecurityGroup"
location = var.location
resource_group_name = var.resource_group
}
# Creates a firewall rule on the security group to allow SSH
resource "azurerm_network_security_rule" "example" {
name = "SSH"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefixes = var.allowed_ips
destination_address_prefix = "*"
resource_group_name = var.resource_group
network_security_group_name = azurerm_network_security_group.vm_security_group.name
}

View File

@ -0,0 +1,147 @@
variable "environment" {
description = "deployment environment - dev/staging/prod"
type = string
default = "dev"
}
variable "resource_group" {
description = "the azure resource group that will hold our stuff"
type = string
}
variable "location" {
description = "geo region where our items will be created"
type = string
default = "West Europe"
}
variable "subnet_prefixes" {
description = "internal subnet prefixes"
type = list(any)
}
variable "vm_net_iface_name" {
description = "The name of the Network Interface. Changing this forces a new resource to be created."
type = string
}
variable "vm_net_iface_ipconfig_name" {
description = "A name used for this IP Configuration."
type = string
}
variable "vm_net_iface_private_ip_address_allocation" {
description = "The allocation method used for the Private IP Address. Possible values are Dynamic and Static"
type = string
}
variable "vm_name" {
description = "The name of the Linux Virtual Machine. Changing this forces a new resource to be created."
type = string
}
variable "vm_computer_name" {
description = "The hostname of the Linux Virtual Machine. Changing this forces a new resource to be created."
type = string
}
variable "vm_size" {
description = "The SKU which should be used for this Virtual Machine, such as Standard_F2."
type = string
}
variable "vm_admin_username" {
description = "The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created."
type = string
}
variable "vm_os_disk_caching" {
description = "The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite."
type = string
}
variable "vm_os_disk_size_gb" {
description = "The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from."
}
variable "vm_storage_account_type" {
description = "The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created."
type = string
}
variable "storage_account_url" {
description = "where to send logs"
type = string
}
variable "vm_source_image_publisher" {
description = "Specifies the publisher of the image used to create the virtual machines."
type = string
}
variable "vm_source_image_offer" {
description = "Specifies the offer of the image used to create the virtual machines. az vm image list --all --publisher Canonical --offer 0001-com-ubuntu-server-jammy-daily --output table"
type = string
}
variable "vm_source_image_sku" {
description = "Specifies the SKU of the image used to create the virtual machines."
type = string
}
variable "vm_source_image_verson" {
description = "Specifies the version of the image used to create the virtual machines."
type = string
}
variable "kv_id" {
description = "ID for the keyvault"
type = string
}
variable "kv_name" {
description = "Name for the keyvault"
type = string
}
variable "kv_key_name" {
description = "Name for the ssh key"
type = string
}
variable "kv_key_type" {
description = "type of key to create: Possible values are EC (Elliptic Curve), EC-HSM, Oct (Octet), RSA and RSA-HSM"
type = string
}
variable "kv_key_size" {
description = "Specifies the Size of the RSA key to create in bytes. For example, 1024 or 2048. Note: This field is required if key_type is RSA or RSA-HSM"
type = number
}
variable "vnet_name" {
description = "name of the outer-most virtual network boundary"
type = string
}
variable "vnet_subnet_name" {
description = "internal subnet name"
type = string
}
variable "network_security_group" {
description = "security group to attach to"
type = any
}
variable "admin_users" {
description = "object_id's for users /groups that will get admin access to things"
type = list(string)
}
variable "allowed_ips" {
description = "addresses allowed to access the infra"
type = list(string)
}

View File

@ -0,0 +1,112 @@
##################################
# Azure Virtual Machine Scale Set#
##################################
# Cloud-init file that will provision each machine as it boots
data "template_file" "cloudconfig" {
template = "${file("${path.module}/cloud-init.txt")}"
}
# formatting the cloud-init file for azure
data "template_cloudinit_config" "config" {
gzip = true
base64_encode = true
part {
content_type = "text/cloud-config"
content = "${data.template_file.cloudconfig.rendered}"
}
}
# create a password for the virtual machine
resource "random_password" "vm_admin_password" {
length = 16
special = false
}
# add the password to the keyvault
resource "azurerm_key_vault_secret" "vm_admin_password" {
name = "${var.environment}vmadmin"
value = "${random_password.vm_admin_password.result}"
content_type = "text/plain"
key_vault_id = var.kv_id
}
# create the virtual machine scale set
resource "azurerm_linux_virtual_machine_scale_set" "virtual_machine" {
name = "${var.environment}-${var.vm_name}"
resource_group_name = var.resource_group
location = var.location
sku = var.vm_size
instances = 1
admin_username = var.vm_admin_username
admin_password = random_password.vm_admin_password.result
#allow_extension_operations = false
disable_password_authentication = false
computer_name_prefix = var.vm_computer_name
# this is the cloud-init data
custom_data = "${data.template_cloudinit_config.config.rendered}"
network_interface {
name = var.vm_net_iface_name
enable_accelerated_networking = false
enable_ip_forwarding = true
network_security_group_id = azurerm_network_security_group.vm_security_group.id
primary = true
ip_configuration {
name = var.vm_net_iface_ipconfig_name
primary = true
subnet_id = azurerm_subnet.vm_subnet.id
public_ip_address {
name = "vmpip"
}
}
}
os_disk {
caching = var.vm_os_disk_caching
storage_account_type = var.vm_storage_account_type
disk_size_gb = var.vm_os_disk_size_gb
write_accelerator_enabled = false
}
data_disk {
caching = "ReadWrite"
create_option = "Empty"
disk_size_gb = "32"
lun = "1"
storage_account_type = "Standard_LRS"
write_accelerator_enabled = false
}
source_image_reference {
publisher = var.vm_source_image_publisher
offer = var.vm_source_image_offer
sku = var.vm_source_image_sku
version = var.vm_source_image_verson
}
timeouts {
create = "6m"
update = "6m"
delete = "6m"
}
boot_diagnostics {
storage_account_uri = var.storage_account_url
}
identity {
type = "UserAssigned"
identity_ids = var.admin_users
}
depends_on = [
data.template_cloudinit_config.config
]
}

View File

@ -0,0 +1,69 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>3.14.0"
}
azuread = {
source = "hashicorp/azuread"
version = "~>2.26.1"
}
random = {
source = "hashicorp/random"
version = "~>3.3.1"
}
tls = {
source = "hashicorp/tls"
version = "~>3.4.0"
}
time = {
source = "hashicorp/time"
version = "~>0.7.2"
}
cloudinit = {
source = "hashicorp/cloudinit"
version = "~>2.2.0"
}
}
backend "azurerm" {
resource_group_name = "terraform-state"
storage_account_name = "jumphoststate"
container_name = "state"
key = "dev.terraform.tfstate"
}
}
provider "tls" {
}
provider "azurerm" {
features {
resource_group {
prevent_deletion_if_contains_resources = false
}
key_vault {
purge_soft_delete_on_destroy = true
recover_soft_deleted_key_vaults = false
}
virtual_machine {
delete_os_disk_on_deletion = true
graceful_shutdown = false
skip_shutdown_and_force_delete = false
}
}
}
provider "azuread" {
}
provider "random" {
}
provider "time" {
}
provider "cloudinit" {
}

View File

@ -0,0 +1,150 @@
name: Deploy infra via Terraform
on:
pull_request:
branches: [ "main", "testing", "develop" ]
push:
branches: [ "main" ]
jobs:
tf_fmt:
env:
ARM_CLIENT_ID: ${{ secrets.CLIENTID }}
ARM_CLIENT_SECRET: ${{ secrets.CLIENTSECRET }}
ARM_SUBSCRIPTION_ID: ${{ secrets.SUBSCRIPTIONID }}
ARM_TENANT_ID: ${{ secrets.TENANTID }}
TF_ROOT: "."
name: Terraform Deployment
runs-on: ubuntu-latest
steps:
- name: Checkout Repo
uses: actions/checkout@v1
- name: Azure Login
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Azure CLI script file
uses: azure/CLI@v1
with:
azcliversion: latest
inlineScript: |
chmod +x $GITHUB_WORKSPACE/firewall.sh
bash $GITHUB_WORKSPACE/firewall.sh add
- name: Logout
run: az logout
- name: Terraform actions
uses: hashicorp/setup-terraform@v2
- name: Terraform fmt
id: fmt
run: terraform fmt -check
continue-on-error: false
- name: Terraform Init
id: init
run: terraform init
- name: Terraform Validate
id: validate
run: terraform validate -no-color
- name: Terraform Plan and Save
id: plan
run: terraform plan -no-color
continue-on-error: true
- uses: actions/github-script@v6
if: github.event_name == 'pull_request'
env:
PLAN: "terraform\n${{ steps.plan.outputs.stdout }}"
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
// 1. Retrieve existing bot comments for the PR
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
})
const botComment = comments.find(comment => {
return comment.user.type === 'Bot' && comment.body.includes('Terraform Format and Style')
})
// 2. Prepare format of the comment
const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\`
#### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\`
#### Terraform Validation 🤖\`${{ steps.validate.outcome }}\`
<details><summary>Validation Output</summary>
\`\`\`\n
${{ steps.validate.outputs.stdout }}
\`\`\`
</details>
#### Terraform Plan 📖\`${{ steps.plan.outcome }}\`
<details><summary>Show Plan</summary>
\`\`\`\n
${process.env.PLAN}
\`\`\`
</details>
*Pusher: @${{ github.actor }}, Action: \`${{ github.event_name }}\`, Working Directory: \`${{ env.tf_actions_working_dir }}\`, Workflow: \`${{ github.workflow }}\`*`;
// 3. If we have a comment, update it, otherwise create a new one
if (botComment) {
github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: output
})
} else {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
})
}
- name: Terraform Plan Status
if: steps.plan.outcome == 'failure'
run: exit 1
- name: Terraform Apply
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: terraform apply -auto-approve -input=false
- name: Azure Login
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Azure CLI script file
uses: azure/CLI@v1
with:
azcliversion: latest
inlineScript: |
chmod +x $GITHUB_WORKSPACE/firewall.sh
bash $GITHUB_WORKSPACE/firewall.sh remove
- name: Fail gracefully
if: ${{ failure() }}
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Fail gracefully
if: ${{ failure() }}
uses: azure/CLI@v1
with:
azcliversion: latest
inlineScript: |
chmod +x $GITHUB_WORKSPACE/firewall.sh
bash $GITHUB_WORKSPACE/firewall.sh remove

View File

@ -0,0 +1,127 @@
################################
# Location and Project settings
################################
resource_group = "scaleset"
location = "West Europe"
subscription_id = "d520e0d1-8ce2-4bf3-bb06-443ee372cfec"
tenant_id = "883785d3-d65d-4088-aca9-4deeb6cf92dc"
allowed_ips = ["178.85.156.146"]
admin_users = ["4b036f16-1f92-49be-a31d-3e2569b5e7fa"]
################################
# Environents to create/manage
################################
environment = {
0 : "blue",
1 : "green"
}
################################
# resource sizing
################################
account_tier = {
# Defines the Tier to use for this storage account.
# Valid options are Standard and Premium. For BlockBlobStorage and FileStorage accounts only Premium is valid
0 : "Standard",
1 : "Standard",
}
cr_sku = {
# SKU for the container registry: Basic, Standard and Premium.
# Disabling public network access is not supported for the SKU Basic or Standard.
0 : "Standard",
1 : "Premium"
}
public_network_access_enabled = {
0 : true,
1 : true
}
kv_sku_name = {
# SKU of the keyvault service: standard and premium
0 : "standard",
1 : "standard"
}
vm_size = {
# https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-b-series-burstable
0 : "Standard_B1s",
1 : "Standard_B1s"
}
vm_os_disk_size_gb = {
# VM disk size in GB
0 : "30",
1 : "30"
}
vm_data_disk_size_gb = {
# VM disk size in GB
0 : "30",
1 : "30"
}
write_accelerator_enabled = {
0: false,
1: false
}
accelerated_networking = {
0: false
1: false
}
###########################
# Virtual Network settings
############################
vnet_name = "virtual-cage"
vnet_address_space = ["10.0.0.0/8"]
vnet_subnet_name = "internal-subnet"
###########################
# Container Registry Name
###########################
cr_name = "cloudyCR"
###########################
# KeyVault and Key Settings
###########################
kv_name = "cloudyKV"
kv_key_name = "generated-key"
kv_key_type = "RSA"
kv_key_size = 2048
###########################
# Virtual Machine Settings
###########################
vm_name = "virtualmachine"
vm_computer_name = "cloudy-vm"
# changing this name will also have to be updated int he CI pipelines
vm_admin_username = "cloudymax"
# Network iterface
vm_net_iface_name = "vm-nic"
vm_net_iface_ipconfig_name = "vm-nic-config"
vm_net_iface_private_ip_address_allocation = "Dynamic"
# Virtual Machine Disk
# az vm image list --all --publisher Canonical --sku "22_04-daily-lts-gen2" --query "[*].{version:version,architecture:architecture,sku:sku,offer:offer,version:version}" --output table
# choose the latest version
vm_os_disk_caching = "ReadWrite"
vm_storage_account_type = "Standard_LRS"
vm_source_image_publisher = "Canonical"
vm_source_image_offer = "0001-com-ubuntu-server-jammy-daily"
vm_source_image_sku = "22_04-daily-lts-gen2"
vm_source_image_verson = "22.04.202209270"
###########################
# Log Settings
###########################
detailed_error_messages = true
failed_request_tracing = true
log_level = "Information"
log_retention_in_days = 30
sas_url = ""
###########################
# Log storage options
###########################
logs_enabled = true
storage_acct_name = "xlogxbucketx"
account_replication_type = "LRS"
log_storage_tier = "Hot"

219
dist/platforms/azure/variables.tf vendored 100755
View File

@ -0,0 +1,219 @@
variable "environment" {
description = "deployment environment - dev/staging/prod"
type = map(any)
}
variable "allowed_ips" {
description = "ip addresses allowed to access the infra"
type = list(string)
}
variable "resource_group" {
description = "the azure resource group that will hold our stuff"
type = string
}
variable "location" {
description = "geo region where our items will be created"
type = string
default = "West Europe"
}
variable "minimum_tls_version" {
description = "The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2."
type = number
default = 1.2
}
variable "detailed_error_messages" {
description = "Should detailed error messages be enabled."
type = string
}
variable "failed_request_tracing" {
description = "Should failed request tracing be enabled."
type = string
}
variable "log_level" {
description = "Log level. Possible values include: Verbose, Information, Warning, and Error."
type = string
default = "Warning"
}
variable "log_retention_in_days" {
description = "The time in days after which to remove blobs. A value of 0 means no retention."
type = number
default = 7
}
variable "sas_url" {
description = "SAS url to an Azure blob container with read/write/list/delete permissions."
type = string
}
variable "vnet_name" {
description = "name of the outer-most virtual network boundary"
type = string
}
variable "vnet_address_space" {
description = "address space for the outer vnet"
type = list(any)
default = ["10.0.0.0/16"]
}
variable "vnet_subnet_name" {
description = "internal subnet name"
type = string
}
variable "vm_net_iface_name" {
description = "The name of the Network Interface. Changing this forces a new resource to be created."
type = string
}
variable "vm_net_iface_ipconfig_name" {
description = "A name used for this IP Configuration."
type = string
}
variable "vm_net_iface_private_ip_address_allocation" {
description = "The allocation method used for the Private IP Address. Possible values are Dynamic and Static"
type = string
}
variable "vm_name" {
description = "The name of the Linux Virtual Machine. Changing this forces a new resource to be created."
type = string
}
variable "vm_computer_name" {
description = "The hostname of the Linux Virtual Machine. Changing this forces a new resource to be created."
type = string
}
variable "vm_size" {
description = "The SKU which should be used for this Virtual Machine, such as Standard_F2."
type = map(any)
}
variable "vm_admin_username" {
description = "The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created."
type = string
}
variable "vm_os_disk_caching" {
description = "The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite."
type = string
}
variable "vm_os_disk_size_gb" {
description = "The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from."
type = map(any)
}
variable "vm_storage_account_type" {
description = "The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created."
type = string
}
variable "vm_source_image_publisher" {
description = "Specifies the publisher of the image used to create the virtual machines."
type = string
}
variable "vm_source_image_offer" {
description = "Specifies the offer of the image used to create the virtual machines. az vm image list --all --publisher Canonical --offer 0001-com-ubuntu-server-jammy-daily --output table"
type = string
}
variable "vm_source_image_sku" {
description = "Specifies the SKU of the image used to create the virtual machines."
type = string
}
variable "vm_source_image_verson" {
description = "Specifies the version of the image used to create the virtual machines."
type = string
}
variable "cr_name" {
description = "Name for the container registry"
type = string
}
variable "cr_sku" {
description = "SKU for the container registry: Basic, Standard and Premium."
type = map(any)
}
variable "kv_name" {
description = "Name for the keyvault"
type = string
}
variable "kv_sku_name" {
description = "SKU of the keyvault service: standard and premium"
type = map(any)
}
variable "kv_key_name" {
description = "Name for the ssh key"
type = string
}
variable "kv_key_type" {
description = "type of key to create: Possible values are EC (Elliptic Curve), EC-HSM, Oct (Octet), RSA and RSA-HSM"
type = string
}
variable "kv_key_size" {
description = "Specifies the Size of the RSA key to create in bytes. For example, 1024 or 2048. Note: This field is required if key_type is RSA or RSA-HSM"
type = number
}
variable "storage_acct_name" {
description = "Storage account name for the account that will hold out logs/backups"
type = string
}
variable "account_tier" {
description = "logging storage account tier: Defines the Tier to use for this storage account. Valid options are Standard and Premium. For BlockBlobStorage and FileStorage accounts only Premium is valid. Changing this forces a new resource to be created."
type = map(any)
}
variable "account_replication_type" {
description = " Defines the type of replication to use for this storage account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. Changing this forces a new resource to be created when types LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice versa."
type = string
}
variable "log_storage_tier" {
description = "Defines the access tier for BlobStorage, FileStorage and StorageV2 accounts. Valid options are Hot and Cool, defaults to Hot"
type = string
}
variable "tenant_id" {
description = "value"
type = string
}
variable "subscription_id" {
description = "value"
type = string
}
variable "public_network_access_enabled" {
description = "decide if the contaier registry will be locked dow or not, only available on premium tier"
}
variable "logs_enabled" {
description = "Logs on/off"
type = bool
default = true
}
variable "admin_users" {
description = "object_id's for users /groups that will get admin access to things"
type = list(string)
}