test out arrow on aws

This commit is contained in:
Noah Masur 2024-05-04 23:05:55 -04:00
parent c0356cab62
commit 8f0f7911b0
No known key found for this signature in database
10 changed files with 341 additions and 98 deletions

154
.github/workflows/arrow-aws.yml vendored Normal file
View File

@ -0,0 +1,154 @@
name: Arrow (AWS)
run-name: Arrow (AWS) - ${{ inputs.rebuild && 'Rebuild and ' || '' }}${{ inputs.action == 'create' && 'Create' || ( inputs.action == 'destroy' && 'Destroy' || 'No Action' ) }}
env:
TERRAFORM_DIRECTORY: hosts/arrow/aws
DEPLOY_IDENTITY_BASE64: ${{ secrets.DEPLOY_IDENTITY_BASE64 }}
ARROW_IDENTITY_BASE64: ${{ secrets.ARROW_IDENTITY_BASE64 }}
ZONE_NAME: masu.rs
CLOUDFLARE_API_TOKEN: ${{ secrets.CLOUDFLARE_API_TOKEN }}
CLOUDFLARE_ZONE_ID: ${{ secrets.CLOUDFLARE_ZONE_ID }}
on:
workflow_dispatch:
inputs:
rebuild:
type: boolean
default: false
action:
type: choice
required: true
default: create
options:
- create
- destroy
- nothing
size:
type: choice
required: false
options:
- t3a.small # 2 GB RAM / $10
jobs:
build-deploy:
name: Build and Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout Repo Code
uses: actions/checkout@v4
# Enable access to KVM, required to build an image
- name: Enable KVM group perms
if: inputs.rebuild && inputs.action != 'destroy'
run: |
echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules
sudo udevadm control --reload-rules
sudo udevadm trigger --name-match=kvm
# Login to AWS
- name: AWS Assume Role
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: github_actions_admin
aws-region: us-east-1
# Install Nix
- name: Install Nix
if: inputs.rebuild && inputs.action != 'destroy'
uses: cachix/install-nix-action@v17
# Build the image
- name: Build Image
if: inputs.rebuild && inputs.action != 'destroy'
run: nix build .#arrow-aws
# # Copy the image to S3
# - name: Upload Image to Cache
# env:
# NIX_CACHE_PRIVATE_KEY: ${{ secrets.NIX_CACHE_PRIVATE_KEY }}
# run: |
# echo "$NIX_CACHE_PRIVATE_KEY" > cache.key
# nix store sign --key-file cache.key $(readlink result)
# nix copy --to s3://t2-aws-nixos-test $(readlink result)
# rm cache.key
# Installs the Terraform binary and some other accessory functions.
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
# Checks whether Terraform is formatted properly. If this fails, you
# should install the pre-commit hook.
- name: Check Formatting
working-directory: ${{ env.TERRAFORM_DIRECTORY }}
run: |
terraform fmt -no-color -check -diff -recursive
# Connects to remote state backend and download providers.
- name: Terraform Init
working-directory: ${{ env.TERRAFORM_DIRECTORY }}
run: terraform init
# Deploys infrastructure or changes to infrastructure.
- name: Terraform Apply
if: inputs.action == 'create'
working-directory: ${{ env.TERRAFORM_DIRECTORY }}
env:
TF_VAR_terraform_state_bucket: ${{ secrets.TERRAFORM_STATE_BUCKET }}
TF_VAR_terraform_state_key: arrow.tfstate
TF_VAR_ec2_size: ${{ inputs.size }}
run: |
terraform apply \
-auto-approve \
-input=false
# Removes infrastructure.
- name: Terraform Destroy
if: inputs.action == 'destroy'
working-directory: ${{ env.TERRAFORM_DIRECTORY }}
run: |
terraform destroy \
-auto-approve \
-input=false
- name: Get Host IP
if: inputs.action == 'create'
id: host
working-directory: ${{ env.TERRAFORM_DIRECTORY }}
run: terraform output -raw host_ip
- name: Wait on SSH
if: inputs.action == 'create'
run: |
for i in $(seq 1 15); do
if $(nc -z -w 3 ${{ steps.host.outputs.stdout }} 22); then
exit 0
fi
sleep 10
done
- name: Write Identity Keys to Files
if: inputs.action == 'create'
run: |
echo "${{ env.DEPLOY_IDENTITY_BASE64 }}" | base64 -d > deploy_ed25519
chmod 0600 deploy_ed25519
echo "${{ env.ARROW_IDENTITY_BASE64 }}" | base64 -d > arrow_ed25519
chmod 0600 arrow_ed25519
- name: Copy Identity File to Host
if: inputs.action == 'create'
run: |
ssh -i deploy_ed25519 -o StrictHostKeyChecking=accept-new noah@${{ steps.host.outputs.stdout }} 'mkdir -pv .ssh'
scp -i deploy_ed25519 arrow_ed25519 noah@${{ steps.host.outputs.stdout }}:~/.ssh/id_ed25519
- name: Wipe Records
if: ${{ inputs.action == 'destroy' }}
run: |
RECORD_ID=$(curl --request GET \
--url https://api.cloudflare.com/client/v4/zones/${{ env.CLOUDFLARE_ZONE_ID }}/dns_records \
--header 'Content-Type: application/json' \
--header "Authorization: Bearer ${{ env.CLOUDFLARE_API_TOKEN }}" | jq -r '.result[] | select(.name == "transmission.${{ env.ZONE_NAME }}") | .id')
curl --request DELETE \
--url https://api.cloudflare.com/client/v4/zones/${{ env.CLOUDFLARE_ZONE_ID }}/dns_records/${RECORD_ID} \
--header 'Content-Type: application/json' \
--header "Authorization: Bearer ${{ env.CLOUDFLARE_API_TOKEN }}"

View File

@ -327,6 +327,25 @@
format = "iso"; format = "iso";
modules = import ./hosts/arrow/modules.nix { inherit inputs globals overlays; }; modules = import ./hosts/arrow/modules.nix { inherit inputs globals overlays; };
}; };
x86_64-linux.arrow-aws = inputs.nixos-generators.nixosGenerate {
system = "x86_64-linux";
format = "amazon";
modules = import ./hosts/arrow/modules.nix { inherit inputs globals overlays; } ++ [
# import
# ./modules/aws
# { inherit inputs; }
(
{ ... }:
{
boot.kernelPackages = inputs.nixpkgs.legacyPackages.x86_64-linux.linuxKernel.packages.linux_6_6;
amazonImage.sizeMB = 16 * 1024;
permitRootLogin = "prohibit-password";
boot.loader.systemd-boot.enable = inputs.nixpkgs.lib.mkForce false;
boot.loader.efi.canTouchEfiVariables = inputs.nixpkgs.lib.mkForce false;
}
)
];
};
# Package Neovim config into standalone package # Package Neovim config into standalone package
x86_64-linux.neovim = neovim "x86_64-linux"; x86_64-linux.neovim = neovim "x86_64-linux";

47
hosts/arrow/aws/ec2.tf Normal file
View File

@ -0,0 +1,47 @@
resource "aws_instance" "instance" {
ami = aws_ami.image.id
instance_type = var.ec2_size
vpc_security_group_ids = [aws_security_group.instance.id]
tags = merge(local.default_tags, {
Name = "aws-nixos"
})
lifecycle {
create_before_destroy = true
}
}
data "aws_vpc" "vpc" {
default = true
}
resource "aws_security_group" "instance" {
name = "t2-aws-nixos-test"
description = "Allow SSH and HTTPS"
vpc_id = data.aws_vpc.vpc.id
ingress {
description = "SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTPS"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}

80
hosts/arrow/aws/image.tf Normal file
View File

@ -0,0 +1,80 @@
locals {
image_file = one(fileset(path.root, "result/nixos-amazon-image-*.vhd"))
}
# Upload image to S3
resource "aws_s3_object" "image" {
bucket = var.images_bucket
key = basename(local.image_file)
source = local.image_file
etag = filemd5(local.image_file)
}
# Setup IAM access for the VM Importer
data "aws_iam_policy_document" "vmimport_trust_policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["vmie.amazonaws.com"]
}
}
}
data "aws_iam_policy_document" "vmimport" {
statement {
actions = [
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
]
resources = [
"arn:aws:s3:::${aws_s3_object.image.bucket}",
"arn:aws:s3:::${aws_s3_object.image.bucket}/*",
]
}
statement {
actions = [
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:RegisterImage",
"ec2:Describe*",
]
resources = ["*"]
}
}
resource "aws_iam_role" "vmimport" {
name = "vmimport"
assume_role_policy = data.aws_iam_policy_document.vmimport_trust_policy.json
inline_policy {
name = "vmimport"
policy = data.aws_iam_policy_document.vmimport.json
}
}
# Import to EBS
resource "aws_ebs_snapshot_import" "image" {
disk_container {
format = "VHD"
user_bucket {
s3_bucket = aws_s3_object.image.bucket
s3_key = aws_s3_object.image.key
}
}
role_name = aws_iam_role.vmimport.name
}
# Convert to AMI
resource "aws_ami" "image" {
description = "Created with NixOS."
name = replace(basename(local.image_file), "/\\.vhd$/", "")
virtualization_type = "hvm"
ebs_block_device {
device_name = "/dev/xvda"
snapshot_id = aws_ebs_snapshot_import.image.id
volume_size = 8
}
}

View File

@ -1,80 +1,15 @@
locals { terraform {
image_file = one(fileset(path.root, "result/nixos-amazon-image-*.vhd")) backend "s3" {
bucket = var.terraform_state_bucket
key = var.terraform_state_key
region = "us-east-1"
dynamodb_table = "terraform-state-lock"
} }
required_version = ">= 1.0.0"
# Upload to S3 required_providers {
resource "aws_s3_object" "image" { aws = {
bucket = "your_bucket_name" source = "hashicorp/aws"
key = basename(local.image_file) version = "5.42.0"
source = local.image_file
etag = filemd5(local.image_file)
}
# Setup IAM access for the VM Importer
data "aws_iam_policy_document" "vmimport_trust_policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["vmie.amazonaws.com"]
} }
} }
} }
data "aws_iam_policy_document" "vmimport" {
statement {
actions = [
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
]
resources = [
"arn:aws:s3:::${aws_s3_object.image.bucket}",
"arn:aws:s3:::${aws_s3_object.image.bucket}/*",
]
}
statement {
actions = [
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:RegisterImage",
"ec2:Describe*",
]
resources = ["*"]
}
}
resource "aws_iam_role" "vmimport" {
name = "vmimport"
assume_role_policy = data.aws_iam_policy_document.vmimport_trust_policy.json
inline_policy {
name = "vmimport"
policy = data.aws_iam_policy_document.vmimport.json
}
}
# Import to EBS
resource "aws_ebs_snapshot_import" "image" {
disk_container {
format = "VHD"
user_bucket {
s3_bucket = aws_s3_object.image.bucket
s3_key = aws_s3_object.image.key
}
}
role_name = aws_iam_role.vmimport.name
}
# Convert to AMI
resource "aws_ami" "image" {
description = "Created with NixOS."
name = replace(basename(local.image_file), "/\\.vhd$/", "")
virtualization_type = "hvm"
ebs_block_device {
device_name = "/dev/xvda"
snapshot_id = aws_ebs_snapshot_import.image.id
volume_size = 8
}
}

View File

@ -0,0 +1,20 @@
variable "ec2_size" {
type = string
description = "Size of instance to launch"
default = "t3a.small" # 2 GB RAM ($14/mo)
}
variable "images_bucket" {
description = "Name of the bucket in which to store the NixOS VM images."
type = string
}
variable "terraform_state_bucket" {
description = "Name of the bucket in which to store the Terraform state information."
type = string
}
variable "terraform_state_key" {
description = "Path of the file in which to store the Terraform state information."
type = string
}

View File

@ -22,9 +22,8 @@
cloudflare.enable = true; cloudflare.enable = true;
services.openssh.enable = true; services.openssh.enable = true;
services.caddy.enable = true; services.caddy.enable = true;
services.transmission.enable = true;
# nix-index seems to each up too much memory for Vultr # nix-index seems to eat up too much memory for Vultr
home-manager.users.${globals.user}.programs.nix-index.enable = inputs.nixpkgs.lib.mkForce false; home-manager.users.${globals.user}.programs.nix-index.enable = inputs.nixpkgs.lib.mkForce false;
virtualisation.vmVariant = { virtualisation.vmVariant = {

View File

@ -1,15 +1,9 @@
{ ... }: { ... }:
{ {
config = {
# AWS settings require this # AWS settings require this
permitRootLogin = "prohibit-password"; permitRootLogin = "prohibit-password";
# Make sure disk size is large enough # Make sure disk size is large enough
# https://github.com/nix-community/nixos-generators/issues/150 # https://github.com/nix-community/nixos-generators/issues/150
formatConfigs.amazon =
{ config, ... }:
{
amazonImage.sizeMB = 16 * 1024; amazonImage.sizeMB = 16 * 1024;
};
};
} }

View File

@ -1,12 +1,7 @@
{ { config, lib, ... }:
config,
pkgs,
lib,
...
}:
{ {
# Enable fstrim, which tracks free space on SSDs for garbage collection # Enable fstrim, which tracks free space on SSDs for garbage collection
# More info: https://www.reddit.com/r/NixOS/comments/rbzhb1/if_you_have_a_ssd_dont_forget_to_enable_fstrim/ # More info: https://www.reddit.com/r/NixOS/comments/rbzhb1/if_you_have_a_ssd_dont_forget_to_enable_fstrim/
services.fstrim.enable = true; services.fstrim.enable = lib.mkIf config.physical true;
} }

View File

@ -29,7 +29,7 @@
GatewayPorts = "no"; GatewayPorts = "no";
X11Forwarding = false; X11Forwarding = false;
PasswordAuthentication = false; PasswordAuthentication = false;
PermitRootLogin = config.permitRootLogin; PermitRootLogin = lib.mkDefault config.permitRootLogin;
}; };
}; };