diff --git a/azure/terraform/cassandra.plan b/azure/terraform/cassandra.plan
new file mode 100644
index 0000000000000000000000000000000000000000..9dcba614cb424ea7102a24c81c4fadf360d90a9a
Binary files /dev/null and b/azure/terraform/cassandra.plan differ
diff --git a/azure/terraform/cassandra.tf b/azure/terraform/cassandra.tf
new file mode 100644
index 0000000000000000000000000000000000000000..113cb8fc6beac3175d6e792215db981c21efe69d
--- /dev/null
+++ b/azure/terraform/cassandra.tf
@@ -0,0 +1,179 @@
+variable "cassandra_servers" {
+  default = 1
+}
+
+variable "cassandra_disk_size" {
+  default = 1024
+}
+
+variable "cassandra_disks_per_server" {
+  default = 4
+}
+
+resource "azurerm_resource_group" "euwest-cassandra" {
+  name     = "euwest-cassandra"
+  location = "westeurope"
+
+  tags = {
+    environment = "Cassandra"
+  }
+}
+
+locals {
+  cassandra_servers = {
+    for i in range(var.cassandra_servers):
+    format("cassandra%02d", i + 1) => {
+      datadisks = {
+        for i in range(var.cassandra_disks_per_server):
+        format("datadisk%02d", i + 1) => {
+          lun  = i + 1
+          path = format("/dev/disk/azure/scsi1/lun%d", i + 1)
+        }
+      }
+    }
+  }
+}
+
+
+resource "azurerm_network_interface" "cassandra-interface" {
+  for_each                      = local.cassandra_servers
+
+  name                          = format("%s-interface", each.key)
+  location                      = "westeurope"
+  resource_group_name           = azurerm_resource_group.euwest-cassandra.name
+  network_security_group_id     = data.azurerm_network_security_group.worker-nsg.id
+
+  enable_accelerated_networking = true
+
+  ip_configuration {
+    name                          = "vaultNicConfiguration"
+    subnet_id                     = data.azurerm_subnet.default.id
+    public_ip_address_id          = ""
+    private_ip_address_allocation = "Dynamic"
+  }
+
+  depends_on                = [azurerm_resource_group.euwest-cassandra]
+}
+
+
+resource "azurerm_virtual_machine" "cassandra-server" {
+  for_each              = local.cassandra_servers
+
+  depends_on            = [azurerm_resource_group.euwest-cassandra]
+
+  name                  = each.key
+  location              = "westeurope"
+  resource_group_name   = azurerm_resource_group.euwest-cassandra.name
+  network_interface_ids = [azurerm_network_interface.cassandra-interface[each.key].id]
+  vm_size               = "Standard_DS13_v2"
+
+  delete_os_disk_on_termination    = true
+  delete_data_disks_on_termination = true
+
+  boot_diagnostics {
+    enabled     = true
+    storage_uri = var.boot_diagnostics_uri
+  }
+
+  storage_os_disk {
+    name              = format("%s-osdisk", each.key)
+    caching           = "ReadWrite"
+    create_option     = "FromImage"
+    managed_disk_type = "Premium_LRS"
+  }
+
+  dynamic storage_data_disk {
+    for_each = each.value.datadisks
+
+    content {
+      name              = format("%s-%s", each.key, storage_data_disk.key)
+      caching           = "None"
+      create_option     = "Empty"
+      managed_disk_type = "Premium_LRS"
+      disk_size_gb      = var.cassandra_disk_size
+      lun               = storage_data_disk.value.lun
+    }
+  }
+
+  storage_image_reference {
+    publisher = "debian"
+    offer     = "debian-10"
+    sku       = "10"
+    version   = "latest"
+  }
+
+  os_profile {
+    computer_name  = each.key
+    admin_username = var.user_admin
+  }
+
+  os_profile_linux_config {
+    disable_password_authentication = true
+    ssh_keys {
+      path     = "/home/${var.user_admin}/.ssh/authorized_keys"
+      key_data = var.ssh_key_data_olasd
+    }
+  }
+
+  provisioner "remote-exec" {
+    inline = [
+      "sudo mkdir /root/.ssh",
+      "echo ${var.ssh_key_data_ardumont} | sudo tee -a /root/.ssh/authorized_keys",
+      "echo ${var.ssh_key_data_olasd} | sudo tee -a /root/.ssh/authorized_keys",
+    ]
+
+    connection {
+      type = "ssh"
+      user = var.user_admin
+      host = azurerm_network_interface.cassandra-interface[self.name].private_ip_address
+    }
+  }
+
+  provisioner "file" {
+    content     = templatefile("templates/firstboot.sh.tpl", {
+      hostname   = self.name
+      fqdn       = format("%s.euwest.azure.internal.softwareheritage.org", self.name)
+      ip_address = azurerm_network_interface.cassandra-interface[self.name].private_ip_address
+      facter_location = "azure_euwest",
+      disks = [
+        for disk in local.cassandra_servers[self.name].datadisks: {
+          base_disk = disk.path
+        }
+      ]
+      raids = [{
+        path          = "/dev/md0"
+        level         = 0
+        chunk         = "128K"
+        members       = [for disk in local.cassandra_servers[self.name].datadisks: format("%s-part1", disk.path)]
+        mountpoint    = "/srv/cassandra"
+        filesystem    = "ext4"
+        mount_options = "defaults"
+      }]
+    })
+    destination = var.firstboot_script
+
+    connection {
+      type = "ssh"
+      user = "root"
+      host = azurerm_network_interface.cassandra-interface[self.name].private_ip_address
+    }
+  }
+
+  provisioner "remote-exec" {
+    inline = [
+      "userdel -f ${var.user_admin}",
+      "chmod +x ${var.firstboot_script}",
+      "cat ${var.firstboot_script}",
+      "${var.firstboot_script}",
+    ]
+    connection {
+      type = "ssh"
+      user = "root"
+      host = azurerm_network_interface.cassandra-interface[self.name].private_ip_address
+    }
+  }
+
+  tags = {
+    environment = "Cassandra"
+  }
+}
diff --git a/azure/terraform/templates/firstboot.sh.tpl b/azure/terraform/templates/firstboot.sh.tpl
index 77057e782cac08982811c45b4ffeb431f83e38b5..9a2cb9b54031dbdbd82f4e033ead72af087d04df 100644
--- a/azure/terraform/templates/firstboot.sh.tpl
+++ b/azure/terraform/templates/firstboot.sh.tpl
@@ -12,19 +12,8 @@ FQDN=${fqdn}
 IP=${ip_address}
 FACTER_LOCATION=${facter_location}
 
-%{ for disk in disks }
-# Make one large partition on ${disk.base_disk}
-echo ';' | sudo sfdisk --label gpt ${disk.base_disk}
-
-mkfs.${disk.filesystem} ${disk.base_disk}1
-
-mkdir -p ${disk.mountpoint}
-
-uuid=$(blkid -o value -s UUID ${disk.base_disk}1)
-echo "UUID=\"$uuid\" ${disk.mountpoint} ${disk.filesystem} ${disk.mount_options} 0 0" >> /etc/fstab
-%{ endfor }
 
-mount -a
+# Handle base system configuration
 
 apt-get -y install lsb-release
 debian_suite=$(lsb_release -cs)
@@ -43,9 +32,53 @@ echo $HOSTNAME > /etc/hostname
 hostnamectl set-hostname $HOSTNAME
 echo "$IP $FQDN $HOSTNAME" >> /etc/hosts
 
+
+# Handle disk configuration
+
+%{ for disk in disks }
+# Make one large partition on ${disk.base_disk}
+echo ';' | sudo sfdisk --label gpt ${disk.base_disk}
+
+%{ if lookup(disk, "filesystem", "") != "" }
+mkfs.${disk.filesystem} ${disk.base_disk}1
+
+mkdir -p ${disk.mountpoint}
+
+uuid=$(blkid -o value -s UUID ${disk.base_disk}1)
+echo "UUID=\"$uuid\" ${disk.mountpoint} ${disk.filesystem} ${disk.mount_options} 0 0" >> /etc/fstab
+%{ endif }
+%{ endfor }
+
+%{ if length(raids) != 0 }
+
+apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --no-install-recommends install mdadm
+
+%{ for raid in raids }
+
+mdadm --create ${raid.path} \
+      --level=${raid.level} \
+      --raid-devices ${length(raid.members)} \
+      %{ if raid.chunk != "" }--chunk=${raid.chunk}%{ endif } \
+      %{~ for member in raid.members } ${member} %{ endfor ~}
+
+%{ if lookup(raid, "filesystem", "") != "" }
+mkfs.${raid.filesystem} ${raid.path}
+
+mkdir -p ${raid.mountpoint}
+
+uuid=$(blkid -o value -s UUID ${raid.path})
+echo "UUID=\"$uuid\" ${raid.mountpoint} ${raid.filesystem} ${raid.mount_options} 0 0" >> /etc/fstab
+%{ endif }
+%{ endfor }
+
+/usr/share/mdadm/mkconf > /etc/mdadm/mdadm.conf
+update-initramfs -k all -u
+%{ endif }
+
+mount -a
+
 # install puppet dependencies
-apt-get -y install -t $${debian_suite}-backports facter
-apt-get -y install puppet
+apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --no-install-recommends install puppet gnupg
 
 # do not need the service live as we need to install some more setup first
 service puppet stop
@@ -58,8 +91,12 @@ echo location=$FACTER_LOCATION > /etc/facter/facts.d/location.txt
 # first time around, this will:
 # - update the node's puppet agent configuration defining the puppet master
 # - generate the certificates with the appropriate fqdn
-# - unfortunately, for now, this fails though, when not being able to
-#   install the apt-transport-https package
-puppet agent --server $PUPPET_MASTER --waitforcert 60 --test
+puppet_exit=0
+
+puppet agent --server $PUPPET_MASTER --waitforcert 60 --test --vardir /var/lib/puppet --detailed-exitcodes || puppet_exit=$?
+
+if [ $puppet_exit -ne 2 ]; then
+    exit $puppet_exit
+fi
 
-#reboot
+# reboot