diff --git a/.github/workflows/arrow-aws.yml b/.github/workflows/arrow-aws.yml index dba1ac8..81e11a9 100644 --- a/.github/workflows/arrow-aws.yml +++ b/.github/workflows/arrow-aws.yml @@ -67,25 +67,18 @@ jobs: - name: Install Nix if: inputs.rebuild && inputs.action != 'destroy' uses: cachix/install-nix-action@v20 - with: - extra_nix_config: | - substituters = s3://${{ secrets.NIX_CACHE_BUCKET }} https://cache.nixos.org/ - trusted-public-keys = ${{ secrets.NIX_CACHE_BUCKET }}:KJLT83NgyLjjX+YOJxFNflmw3/IPwW+y21cpgVv+Kwc= cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= # Build the image - name: Build Image if: inputs.rebuild && inputs.action != 'destroy' run: nix build .#arrow-aws - # Copy the image to S3 - - name: Upload Image to Cache - env: - NIX_CACHE_PRIVATE_KEY: ${{ secrets.NIX_CACHE_PRIVATE_KEY }} + - name: Upload Image to S3 + if: inputs.rebuild && inputs.action != 'destroy' run: | - echo "$NIX_CACHE_PRIVATE_KEY" > cache.key - nix store sign --key-file cache.key $(readlink result) - nix copy --to s3://${{ secrets.NIX_CACHE_BUCKET }} $(readlink result) - rm cache.key + aws s3 cp \ + result/nixos-amazon-image-*.vhd \ + s3://${{ secrets.IMAGES_BUCKET }}/arrow.vhd \ # Installs the Terraform binary and some other accessory functions. - name: Setup Terraform diff --git a/hosts/arrow/aws/ec2.tf b/hosts/arrow/aws/ec2.tf index 43f3c9c..8842de8 100644 --- a/hosts/arrow/aws/ec2.tf +++ b/hosts/arrow/aws/ec2.tf @@ -17,7 +17,7 @@ data "aws_vpc" "vpc" { } resource "aws_security_group" "instance" { - name = "t2-aws-nixos-test" + name = "aws-nixos" description = "Allow SSH and HTTPS" vpc_id = data.aws_vpc.vpc.id diff --git a/hosts/arrow/aws/image.tf b/hosts/arrow/aws/image.tf index ebf7f0c..3560dd1 100644 --- a/hosts/arrow/aws/image.tf +++ b/hosts/arrow/aws/image.tf @@ -1,13 +1,19 @@ -locals { - image_file = one(fileset(path.root, "../../../result/nixos-amazon-image-*.vhd")) -} +# locals { +# image_file = one(fileset(path.root, "../../../result/nixos-amazon-image-*.vhd")) +# } +# +# # Upload image to S3 +# resource "aws_s3_object" "image" { +# bucket = var.images_bucket +# key = basename(local.image_file) +# source = local.image_file +# etag = filemd5(local.image_file) +# } -# Upload image to S3 -resource "aws_s3_object" "image" { +# Use existing image in S3 +data "aws_s3_object" "image" { bucket = var.images_bucket - key = basename(local.image_file) - source = local.image_file - etag = filemd5(local.image_file) + key = "arrow.vhd" } # Setup IAM access for the VM Importer @@ -29,8 +35,8 @@ data "aws_iam_policy_document" "vmimport" { "s3:ListBucket", ] resources = [ - "arn:aws:s3:::${aws_s3_object.image.bucket}", - "arn:aws:s3:::${aws_s3_object.image.bucket}/*", + "arn:aws:s3:::${data.aws_s3_object.image.bucket}", + "arn:aws:s3:::${data.aws_s3_object.image.bucket}/*", ] } statement { @@ -58,8 +64,8 @@ resource "aws_ebs_snapshot_import" "image" { disk_container { format = "VHD" user_bucket { - s3_bucket = aws_s3_object.image.bucket - s3_key = aws_s3_object.image.key + s3_bucket = data.aws_s3_object.image.bucket + s3_key = data.aws_s3_object.image.key } }