summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/apply.rs87
-rw-r--r--src/autoscaling.rs34
-rw-r--r--src/aws_context.rs48
-rw-r--r--src/bin/aws-autoscaling-dns/main.rs68
-rw-r--r--src/converge.rs85
-rw-r--r--src/dns.rs75
-rw-r--r--src/ec2.rs83
-rw-r--r--src/hashable.rs37
-rw-r--r--src/lib.rs10
-rw-r--r--src/result.rs4
-rw-r--r--src/route53.rs143
-rw-r--r--src/single.rs24
12 files changed, 698 insertions, 0 deletions
diff --git a/src/apply.rs b/src/apply.rs
new file mode 100644
index 0000000..86e92b4
--- /dev/null
+++ b/src/apply.rs
@@ -0,0 +1,87 @@
+use aws_sdk_route53::types::{Change, ChangeAction, ChangeBatch, ResourceRecordSet};
+
+use crate::result::Result;
+use crate::route53::Route53;
+
+pub enum ApplyMode {
+ DryRun,
+ Apply,
+}
+
+impl ApplyMode {
+ pub async fn apply<C, R, I>(
+ &self,
+ aws_context: &C,
+ zone_id: &str,
+ remove_records: R,
+ insert_records: I,
+ ) -> Result<()>
+ where
+ C: Route53,
+ R: IntoIterator<Item = ResourceRecordSet>,
+ I: IntoIterator<Item = ResourceRecordSet>,
+ {
+ match self {
+ ApplyMode::DryRun => dry_run(zone_id, remove_records, insert_records).await,
+ ApplyMode::Apply => apply(aws_context, zone_id, remove_records, insert_records).await,
+ }
+ }
+}
+
+async fn dry_run<R, I>(zone_id: &str, remove_records: R, insert_records: I) -> Result<()>
+where
+ R: IntoIterator<Item = ResourceRecordSet>,
+ I: IntoIterator<Item = ResourceRecordSet>,
+{
+ let remove_records: Vec<_> = remove_records.into_iter().collect();
+ let insert_records: Vec<_> = insert_records.into_iter().collect();
+
+ println!("ZONE: {}", zone_id);
+ println!("REMOVE: {:#?}", remove_records);
+ println!("INSERT: {:#?}", insert_records);
+
+ Ok(())
+}
+
+async fn apply<C, R, I>(
+ aws_context: &C,
+ zone_id: &str,
+ remove_records: R,
+ insert_records: I,
+) -> Result<()>
+where
+ C: Route53,
+ R: IntoIterator<Item = ResourceRecordSet>,
+ I: IntoIterator<Item = ResourceRecordSet>,
+{
+ let mut change_batch = ChangeBatch::builder();
+ for remove_record in remove_records {
+ let change = Change::builder()
+ .action(ChangeAction::Delete)
+ .resource_record_set(remove_record)
+ .build();
+ change_batch = change_batch.changes(change);
+ }
+ for insert_record in insert_records {
+ let change = Change::builder()
+ .action(ChangeAction::Create)
+ .resource_record_set(insert_record)
+ .build();
+ change_batch = change_batch.changes(change);
+ }
+ let change_batch = change_batch.build();
+
+ if let Some(changes) = change_batch.changes() {
+ if !changes.is_empty() {
+ aws_context
+ .route53()
+ .change_resource_record_sets()
+ .hosted_zone_id(zone_id)
+ .change_batch(change_batch)
+ .send()
+ .await?;
+ }
+ }
+
+ Ok(())
+}
diff --git a/src/autoscaling.rs b/src/autoscaling.rs
new file mode 100644
index 0000000..6c96279
--- /dev/null
+++ b/src/autoscaling.rs
@@ -0,0 +1,34 @@
+use anyhow::anyhow;
+use aws_sdk_autoscaling as autoscaling;
+use aws_sdk_autoscaling::types::AutoScalingGroup;
+
+use crate::result::Result;
+use crate::single::Single;
+
+pub trait AutoScaling {
+ fn autoscaling(&self) -> &autoscaling::Client;
+}
+
+pub async fn asg_by_name<C>(aws_context: &C, name: &str) -> Result<AutoScalingGroup>
+where
+ C: AutoScaling,
+{
+ let asg_resp = aws_context
+ .autoscaling()
+ .describe_auto_scaling_groups()
+ .auto_scaling_group_names(name)
+ .send()
+ .await?;
+
+ let auto_scaling_groups = asg_resp.auto_scaling_groups().unwrap_or(&[]);
+ let auto_scaling_group = auto_scaling_groups
+ .iter()
+ .map(ToOwned::to_owned)
+ .single()
+ .ok_or(anyhow!(
+ "No unique autoscaling group found with name: {}",
+ name
+ ))?;
+
+ Ok(auto_scaling_group)
+}
diff --git a/src/aws_context.rs b/src/aws_context.rs
new file mode 100644
index 0000000..2eff941
--- /dev/null
+++ b/src/aws_context.rs
@@ -0,0 +1,48 @@
+use aws_sdk_autoscaling as asg;
+use aws_sdk_ec2 as ec2;
+use aws_sdk_route53 as route53;
+use aws_types::SdkConfig;
+
+use crate::autoscaling::AutoScaling;
+use crate::ec2::Ec2;
+use crate::route53::Route53;
+
+pub struct AwsContext {
+ asg: asg::Client,
+ ec2: ec2::Client,
+ route53: route53::Client,
+}
+
+impl AwsContext {
+ pub async fn from_env() -> Self {
+ let config = aws_config::from_env().load().await;
+
+ Self::new(&config)
+ }
+
+ fn new(config: &SdkConfig) -> Self {
+ Self {
+ asg: asg::Client::new(config),
+ ec2: ec2::Client::new(config),
+ route53: route53::Client::new(config),
+ }
+ }
+}
+
+impl AutoScaling for AwsContext {
+ fn autoscaling(&self) -> &asg::Client {
+ &self.asg
+ }
+}
+
+impl Ec2 for AwsContext {
+ fn ec2(&self) -> &ec2::Client {
+ &self.ec2
+ }
+}
+
+impl Route53 for AwsContext {
+ fn route53(&self) -> &route53::Client {
+ &self.route53
+ }
+}
diff --git a/src/bin/aws-autoscaling-dns/main.rs b/src/bin/aws-autoscaling-dns/main.rs
new file mode 100644
index 0000000..739f0c8
--- /dev/null
+++ b/src/bin/aws-autoscaling-dns/main.rs
@@ -0,0 +1,68 @@
+use std::fmt::Debug;
+
+use clap::Parser;
+use trust_dns_proto::rr::Name;
+
+use aws_autoscaling_dns::apply::ApplyMode;
+use aws_autoscaling_dns::aws_context::AwsContext;
+use aws_autoscaling_dns::converge::named_asg_changes;
+use aws_autoscaling_dns::result::Result;
+
+/// Synchronize a DNS entry with an autoscaling group's running instances.
+///
+/// The given DNS name's A and AAAA records in Route53 will be rewritten to exactly
+/// match the list of pending and in-service EC2 instances in the specified
+/// autoscaling group. Records of other types (including CNAMEs) will not be
+/// modified, so this can be used alongside DNS ACME verification, SPF, and other
+/// DNS applications.
+#[derive(Parser, Debug)]
+struct Args {
+ /// The name of the autoscaling group to synchronize.
+ #[arg(long)]
+ autoscaling_group: String,
+
+ /// The DNS domain name to synchronize. The most specific Route53 zone that
+ /// contains this name will be modified.
+ #[arg(long)]
+ dns_name: Name,
+
+ /// The TTL (in seconds) for newly-created records.
+ #[arg(long, default_value_t = 300)]
+ dns_ttl: i64,
+
+ /// Print the affected zone ID and pending changes, without applying them (default).
+ #[arg(long, conflicts_with = "apply")]
+ dry_run: bool,
+
+ /// Apply the changes to Route53.
+ #[arg(long)]
+ apply: bool,
+}
+
+#[tokio::main]
+async fn main() -> Result<()> {
+ let args = Args::parse();
+ let name = args.autoscaling_group;
+
+ let aws_context = AwsContext::from_env().await;
+
+ let apply_mode = if args.dry_run {
+ ApplyMode::DryRun
+ } else if args.apply {
+ ApplyMode::Apply
+ } else {
+ ApplyMode::DryRun
+ };
+
+ let changes = named_asg_changes(&aws_context, &name, &args.dns_name, args.dns_ttl).await?;
+ apply_mode
+ .apply(
+ &aws_context,
+ &changes.zone_id,
+ changes.remove,
+ changes.insert,
+ )
+ .await?;
+
+ Ok(())
+}
diff --git a/src/converge.rs b/src/converge.rs
new file mode 100644
index 0000000..073e9e6
--- /dev/null
+++ b/src/converge.rs
@@ -0,0 +1,85 @@
+use std::fmt::Debug;
+
+use anyhow::anyhow;
+use aws_sdk_autoscaling::types::AutoScalingGroup;
+use aws_sdk_route53::types::ResourceRecordSet;
+use futures::try_join;
+use trust_dns_proto::rr::Name;
+
+use crate::autoscaling::{asg_by_name, AutoScaling};
+use crate::dns::AutoScalingGroupConfig;
+use crate::ec2::{instance_recordsets, Ec2};
+use crate::hashable::Hashable;
+use crate::result::Result;
+use crate::route53::{zone_for_domain, zone_suffix_recordsets, Route53};
+
+#[derive(Debug)]
+pub struct Changes<T> {
+ pub zone_id: String,
+ pub remove: T,
+ pub insert: T,
+}
+
+async fn changes<C>(
+ aws_context: &C,
+ auto_scaling_group: &AutoScalingGroup,
+ dns_name: &Name,
+ dns_ttl: i64,
+) -> Result<Changes<impl IntoIterator<Item = ResourceRecordSet> + Debug>>
+where
+ C: Ec2 + Route53,
+{
+ let AutoScalingGroupConfig {
+ name: asg_name,
+ live_instance_ids,
+ } = AutoScalingGroupConfig::try_from(auto_scaling_group)?;
+
+ let zone = zone_for_domain(dns_name, aws_context).await?;
+ let zone_id = zone
+ .id()
+ .ok_or(anyhow!("No ID for hosted zone for name: {}", dns_name))?;
+
+ let (intended_records, actual_records) = try_join!(
+ instance_recordsets(
+ &asg_name,
+ dns_name,
+ dns_ttl,
+ &live_instance_ids,
+ aws_context
+ ),
+ zone_suffix_recordsets(dns_name, zone_id, aws_context),
+ )?;
+
+ let remove_records = actual_records.difference(&intended_records);
+ let insert_records = intended_records.difference(&actual_records);
+
+ let remove_records = remove_records.map(Hashable::as_ref);
+ let insert_records = insert_records.map(Hashable::as_ref);
+
+ let remove_records = remove_records.map(ToOwned::to_owned);
+ let insert_records = insert_records.map(ToOwned::to_owned);
+
+ let remove_records: Vec<_> = remove_records.collect();
+ let insert_records: Vec<_> = insert_records.collect();
+
+ Ok(Changes {
+ zone_id: zone_id.into(),
+ remove: remove_records,
+ insert: insert_records,
+ })
+}
+
+pub async fn named_asg_changes<C>(
+ aws_context: &C,
+ name: &str,
+ dns_name: &Name,
+ dns_ttl: i64,
+) -> Result<Changes<impl IntoIterator<Item = ResourceRecordSet> + Debug>>
+where
+ C: AutoScaling + Ec2 + Route53,
+{
+ let auto_scaling_group = asg_by_name(aws_context, name).await?;
+
+ let changes = changes(aws_context, &auto_scaling_group, dns_name, dns_ttl).await?;
+ Ok(changes)
+}
diff --git a/src/dns.rs b/src/dns.rs
new file mode 100644
index 0000000..13e3650
--- /dev/null
+++ b/src/dns.rs
@@ -0,0 +1,75 @@
+use std::convert::TryFrom;
+use std::fmt::Debug;
+
+use anyhow::anyhow;
+use aws_sdk_autoscaling::types::{AutoScalingGroup, Instance, LifecycleState};
+use trust_dns_proto::rr::Name;
+
+use crate::result::{Error, Result};
+
+#[derive(Debug)]
+pub struct AutoScalingGroupConfig {
+ pub name: String,
+ pub live_instance_ids: Vec<String>,
+}
+
+impl AutoScalingGroupConfig {
+ fn live_instance_ids<'a>(instances: impl IntoIterator<Item = &'a Instance>) -> Vec<String> {
+ instances
+ .into_iter()
+ .filter(|instance| match instance.lifecycle_state() {
+ // See <https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html>
+ //
+ // Include pending instances so that they can obtain certs, etc.
+ Some(LifecycleState::Pending) => true,
+ Some(LifecycleState::PendingWait) => true,
+ Some(LifecycleState::PendingProceed) => true,
+ Some(LifecycleState::InService) => true,
+ _ => false,
+ })
+ .flat_map(|instance| instance.instance_id())
+ .map(ToOwned::to_owned)
+ .collect()
+ }
+}
+
+impl TryFrom<&AutoScalingGroup> for AutoScalingGroupConfig {
+ type Error = Error;
+
+ fn try_from(autoscaling_group: &AutoScalingGroup) -> Result<Self> {
+ let name = autoscaling_group
+ .auto_scaling_group_name()
+ .ok_or(anyhow!("Autoscaling group returned from AWS with no name"))?
+ .to_owned();
+
+ let instances = autoscaling_group.instances().unwrap_or(&[]);
+
+ let live_instance_ids = Self::live_instance_ids(instances);
+
+ Ok(Self {
+ name,
+ live_instance_ids,
+ })
+ }
+}
+
+pub fn suffixes(mut name: Name) -> Vec<Name> {
+ let mut names = Vec::new();
+
+ loop {
+ names.push(name.clone());
+ if name.is_root() {
+ break;
+ } else {
+ name = name.base_name();
+ }
+ }
+
+ names
+}
+
+pub fn absolute(name: Name) -> Result<Name> {
+ let root = &Name::root();
+ let absolute = name.append_name(root)?;
+ Ok(absolute)
+}
diff --git a/src/ec2.rs b/src/ec2.rs
new file mode 100644
index 0000000..28169e7
--- /dev/null
+++ b/src/ec2.rs
@@ -0,0 +1,83 @@
+use std::collections::HashSet;
+
+use aws_sdk_ec2 as ec2;
+use aws_sdk_ec2::types::Filter;
+use aws_sdk_route53::types::{ResourceRecordSet, RrType};
+use tokio_stream::StreamExt;
+use trust_dns_proto::rr::Name;
+
+use crate::dns::absolute;
+use crate::hashable::Hashable;
+use crate::result::Result;
+use crate::route53::recordset;
+
+pub trait Ec2 {
+ fn ec2(&self) -> &ec2::Client;
+}
+
+pub async fn instance_recordsets<C>(
+ asg_name: &str,
+ dns_suffix: &Name,
+ dns_ttl: i64,
+ live_instance_ids: &[String],
+ aws_context: &C,
+) -> Result<HashSet<Hashable<ResourceRecordSet>>>
+where
+ C: Ec2,
+{
+ // If there's nothing running, then (a) we don't need to ask AWS about
+ // running instances, and (b) we can't anyways as the API call requires at
+ // least one instance ID. Abort here.
+ if live_instance_ids.is_empty() {
+ return Ok(HashSet::new());
+ }
+
+ let asg_filter = Filter::builder()
+ .name("tag:aws:autoscaling:groupName")
+ .values(asg_name)
+ .build();
+
+ let mut apex_ip4 = HashSet::new();
+ let mut apex_ip6 = HashSet::new();
+
+ let mut instances_paginator = aws_context
+ .ec2()
+ .describe_instances()
+ .set_instance_ids(Some(live_instance_ids.to_owned()))
+ .filters(asg_filter)
+ .into_paginator()
+ .items()
+ .send();
+
+ while let Some(reservation) = instances_paginator.try_next().await? {
+ let instances = reservation.instances().unwrap_or(&[]);
+ for instance in instances {
+ // Mild abuse of the fact that optional values are also iterable
+ apex_ip4.extend(instance.public_ip_address().map(String::from));
+
+ let instance_interfaces = instance.network_interfaces().unwrap_or(&[]);
+ let instance_ip6: Vec<_> = instance_interfaces
+ .iter()
+ .flat_map(|interface| interface.ipv6_addresses().unwrap_or(&[]))
+ // Flatmap here to drop the None values, unwrap the Some values
+ .flat_map(|ipv6| ipv6.ipv6_address())
+ .map(String::from)
+ .collect();
+
+ apex_ip6.extend(instance_ip6.iter().map(ToOwned::to_owned).map(String::from));
+ }
+ }
+
+ let apex_hostname = absolute(dns_suffix.clone())?;
+ let apex_hostname = apex_hostname.to_ascii();
+
+ let mut asg_recordsets = HashSet::new();
+ if !apex_ip4.is_empty() {
+ asg_recordsets.insert(recordset(&apex_hostname, dns_ttl, RrType::A, apex_ip4).into());
+ }
+ if !apex_ip6.is_empty() {
+ asg_recordsets.insert(recordset(&apex_hostname, dns_ttl, RrType::Aaaa, apex_ip6).into());
+ }
+
+ Ok(asg_recordsets)
+}
diff --git a/src/hashable.rs b/src/hashable.rs
new file mode 100644
index 0000000..22bcdd3
--- /dev/null
+++ b/src/hashable.rs
@@ -0,0 +1,37 @@
+use std::fmt::Debug;
+use std::hash::{Hash, Hasher};
+
+use aws_sdk_route53::types::ResourceRecordSet;
+
+pub trait SimpleHash {
+ fn hash<H: Hasher>(&self, state: &mut H);
+}
+
+#[derive(Debug, PartialEq, Clone)]
+pub struct Hashable<T>(T);
+
+impl<T> AsRef<T> for Hashable<T> {
+ fn as_ref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T> Eq for Hashable<T> where Hashable<T>: PartialEq {}
+
+impl<T: SimpleHash> Hash for Hashable<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.0.hash(state)
+ }
+}
+
+impl<T> From<T> for Hashable<T> {
+ fn from(value: T) -> Self {
+ Self(value)
+ }
+}
+
+impl SimpleHash for ResourceRecordSet {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.name().hash(state)
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..ccfa2ac
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,10 @@
+pub mod apply;
+mod autoscaling;
+pub mod aws_context;
+pub mod converge;
+mod dns;
+mod ec2;
+mod hashable;
+pub mod result;
+mod route53;
+mod single;
diff --git a/src/result.rs b/src/result.rs
new file mode 100644
index 0000000..7c607c5
--- /dev/null
+++ b/src/result.rs
@@ -0,0 +1,4 @@
+use anyhow;
+
+pub type Error = anyhow::Error;
+pub type Result<T> = std::result::Result<T, Error>;
diff --git a/src/route53.rs b/src/route53.rs
new file mode 100644
index 0000000..22e4126
--- /dev/null
+++ b/src/route53.rs
@@ -0,0 +1,143 @@
+use std::collections::HashSet;
+use std::str::FromStr;
+
+use anyhow::anyhow;
+use aws_sdk_route53 as route53;
+use aws_sdk_route53::types::{HostedZone, ResourceRecord, ResourceRecordSet, RrType};
+use trust_dns_proto::rr::Name;
+
+use crate::dns::suffixes;
+use crate::hashable::Hashable;
+use crate::result::Result;
+
+pub trait Route53 {
+ fn route53(&self) -> &route53::Client;
+}
+
+pub async fn zone_for_domain<C>(name: &Name, aws_context: &C) -> Result<HostedZone>
+where
+ C: Route53,
+{
+ let names = suffixes(name.clone());
+
+ // Outer pagination loop needs to be pulled out to a trait - this is some hot nonsense.
+ let mut zone = None;
+ let mut depth = None;
+ let mut zones_marker = None;
+ loop {
+ let zones_resp = aws_context
+ .route53()
+ .list_hosted_zones()
+ .set_marker(zones_marker)
+ .send()
+ .await?;
+
+ let zones = zones_resp.hosted_zones().unwrap_or(&[]);
+ for candidate_zone in zones.iter() {
+ let zone_name = match candidate_zone.name() {
+ None => continue,
+ Some(name) => name,
+ };
+ let zone_name = Name::from_str(zone_name)?;
+ let match_position = names.iter().position(|name| *name == zone_name);
+ match (depth, match_position) {
+ (None, Some(matched_depth)) => {
+ zone = Some(candidate_zone.clone());
+ depth = Some(matched_depth);
+ }
+ (Some(found_depth), Some(matched_depth)) => {
+ if matched_depth < found_depth {
+ zone = Some(candidate_zone.clone());
+ depth = Some(matched_depth);
+ }
+ }
+ (_, _) => {}
+ }
+ }
+
+ if zones_resp.is_truncated() {
+ zones_marker = zones_resp.next_marker().map(String::from);
+ } else {
+ break;
+ }
+ }
+
+ zone.ok_or(anyhow!("No Route53 zone found for DNS suffix: {}", name))
+}
+
+pub async fn zone_suffix_recordsets<C>(
+ dns_suffix: &Name,
+ zone_id: &str,
+ aws_context: &C,
+) -> Result<HashSet<Hashable<ResourceRecordSet>>>
+where
+ C: Route53,
+{
+ let mut suffix_records = HashSet::new();
+
+ let mut next_record_name = Some(dns_suffix.to_ascii());
+ let mut next_record_type = None;
+ let mut next_record_identifier = None;
+
+ loop {
+ let records_resp = aws_context
+ .route53()
+ .list_resource_record_sets()
+ .hosted_zone_id(zone_id)
+ .set_start_record_name(next_record_name)
+ .set_start_record_type(next_record_type)
+ .set_start_record_identifier(next_record_identifier)
+ .send()
+ .await?;
+
+ let recordsets = records_resp.resource_record_sets().unwrap_or(&[]);
+ for recordset in recordsets {
+ let recordset_name = recordset.name().ok_or(anyhow!(
+ "Record set with no name found in zone: {}",
+ zone_id
+ ))?;
+ let recordset_name = Name::from_str(recordset_name)?;
+ let recordset_names = suffixes(recordset_name);
+
+ if !recordset_names.iter().any(|name| name == dns_suffix) {
+ break;
+ }
+
+ suffix_records.insert(recordset.clone().into());
+ }
+
+ if records_resp.is_truncated() {
+ next_record_name = records_resp.next_record_name().map(String::from);
+ next_record_type = records_resp.next_record_type().map(Clone::clone);
+ next_record_identifier = records_resp.next_record_identifier().map(String::from);
+ } else {
+ break;
+ }
+ }
+
+ Ok(suffix_records)
+}
+
+pub fn recordset<I, S>(
+ apex_hostname: &str,
+ dns_ttl: i64,
+ rr_type: RrType,
+ addresses: I,
+) -> ResourceRecordSet
+where
+ I: IntoIterator<Item = S>,
+ S: Into<String>,
+{
+ let apex_ip4_records = addresses
+ .into_iter()
+ .map(|address| address.into())
+ .map(|address| ResourceRecord::builder().value(address).build())
+ .collect();
+
+ ResourceRecordSet::builder()
+ .name(apex_hostname)
+ .r#type(rr_type)
+ .ttl(dns_ttl)
+ .set_resource_records(Some(apex_ip4_records))
+ .build()
+}
diff --git a/src/single.rs b/src/single.rs
new file mode 100644
index 0000000..b3f0b18
--- /dev/null
+++ b/src/single.rs
@@ -0,0 +1,24 @@
+pub trait Single {
+ type Item;
+
+ fn single(self) -> Option<Self::Item>;
+}
+
+impl<T, I> Single for I
+where
+ I: IntoIterator<Item = T>,
+{
+ type Item = T;
+
+ fn single(self) -> Option<Self::Item> {
+ let mut iter = self.into_iter();
+
+ // There are three cases of interest:
+ //
+ // 1. `self` has zero items -> return None.
+ // 2. `self` has two or more items -> return None.
+ // 3. `self` has exactly one item -> return `iter.next()` unchanged, as
+ // it holds that item.
+ iter.next().filter(|_| iter.next().is_none())
+ }
+}