sops config

pull/2/head
Skyler 3 months ago
parent 5b5d04e128
commit ba29dd99e8
  1. 97
      .config.sample.env
  2. 12
      .sops.yaml
  3. 13
      .vscode/extensions.json
  4. 18
      .vscode/settings.json
  5. 28
      cluster/config/cluster-secrets.sops.yaml
  6. 8
      cluster/config/cluster-settings.yaml
  7. 501
      configure
  8. 13
      provision/ansible/inventory/group_vars/kubernetes/k3s.yml
  9. 30
      provision/ansible/inventory/group_vars/master/k3s.yml
  10. 3
      provision/ansible/inventory/group_vars/worker/k3s.yml
  11. 4
      provision/ansible/inventory/hosts.yml
  12. 4
      provision/ansible/playbooks/cluster-nuke.yml
  13. 101
      provision/terraform/cloudflare/main.tf
  14. 11
      tmpl/cluster/cluster-settings.yaml

@ -1,97 +0,0 @@
#
# Cluster related variables
#
# The repo you created from this template
# e.g. https://github.com/onedr0p/home-cluster
export BOOTSTRAP_GIT_REPOSITORY=""
# Optional: Enable GitHub to send a webhook to update Flux, set the following to one of:
# ignored - this feature will be disabled
# generated - this will generate a token and print it in the logs
# Set this to any other string and it will be used for the secret
export BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET="ignored"
# Choose one of your cloudflare domains
# e.g. onedr0p.com
export BOOTSTRAP_CLOUDFLARE_DOMAIN=""
# The email you use to sign into Cloudflare with
export BOOTSTRAP_CLOUDFLARE_EMAIL=""
# Your global Cloudflare API Key
export BOOTSTRAP_CLOUDFLARE_APIKEY=""
# Pick a range of unused IPs that are on the same network as your nodes
# You don't need many IPs, just choose 10 IPs to start with
# e.g. 192.168.1.220-192.168.1.230
export BOOTSTRAP_METALLB_LB_RANGE=""
# The load balancer IP for k8s_gateway, choose from one of the available IPs above
# e.g. 192.168.1.220
export BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR=""
# The load balancer IP for the ingress controller, choose from one of the available IPs above
# that doesn't conflict with any other IP addresses here
# e.g. 192.168.1.221
export BOOTSTRAP_METALLB_INGRESS_ADDR=""
# Age Public Key - string should start with age
# e.g. age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta
export BOOTSTRAP_AGE_PUBLIC_KEY=""
# Enable / Disable kube-vip
# If this is set to false (e.g. on a cluster with a single-node control plane), do not install kube-vip.
# BOOTSTRAP_KUBE_VIP_ADDR needs to be set to the IP the API Server is reachable under
export BOOTSTRAP_KUBE_VIP_ENABLED="true"
# The IP Address to use with kube-vip
# Pick a unused IP that is on the same network as your nodes
# and outside the ${BOOTSTRAP_METALLB_LB_RANGE} range
# and doesn't conflict with any other IP addresses here
# e.g. 192.168.1.254
export BOOTSTRAP_KUBE_VIP_ADDR=""
# Choose your timezone
# Used to set your systems timezone
# and Kured timezone
# e.g. America/New_York
export BOOTSTRAP_TIMEZONE="Etc/UTC"
#
# Ansible related variables
#
#
# Default prefixes for hostnames assigned by Ansible
# These are unused on nodes where BOOTSTRAP_ANSIBLE_HOSTNAME_ is provided
#
export BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX="k8s-"
export BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX="k8s-"
#
# Ansible hosts - repeat this block as many times as you need,
# incrementing the last digit on the variable name for each node
#
# Host IP Address to the control plane node
# That doesn't conflict with any other IP addresses here
# e.g. 192.168.1.200
export BOOTSTRAP_ANSIBLE_HOST_ADDR_0=""
# User Ansible will log into the nodes
export BOOTSTRAP_ANSIBLE_SSH_USERNAME_0=""
# Password Ansible will use to escalate to sudo
export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_0=""
# Set this node as a control node (true/false)
export BOOTSTRAP_ANSIBLE_CONTROL_NODE_0=""
# Optional: Set the hostname of the node, if set this will override the *_HOSTNAME_PREFIX vars above
export BOOTSTRAP_ANSIBLE_HOSTNAME_0=""
# export BOOTSTRAP_ANSIBLE_HOST_ADDR_1=""
# export BOOTSTRAP_ANSIBLE_SSH_USERNAME_1=""
# export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_1=""
# export BOOTSTRAP_ANSIBLE_CONTROL_NODE_1=""
# export BOOTSTRAP_ANSIBLE_HOSTNAME_1=""
# export BOOTSTRAP_ANSIBLE_HOST_ADDR_2=""
# export BOOTSTRAP_ANSIBLE_SSH_USERNAME_2=""
# export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_2=""
# export BOOTSTRAP_ANSIBLE_CONTROL_NODE_2=""
# export BOOTSTRAP_ANSIBLE_HOSTNAME_2=""

@ -0,0 +1,12 @@
---
creation_rules:
- path_regex: provision/.*\.sops\.ya?ml
unencrypted_regex: "^(kind)$"
key_groups:
- age:
- age1elkkx5sqdyvkd8fev4mq9mgxqswcvn99kjxvy6cwzgv3ay8dfa0s9rpaem
- path_regex: cluster/.*\.sops\.ya?ml
encrypted_regex: "^(data|stringData)$"
key_groups:
- age:
- age1elkkx5sqdyvkd8fev4mq9mgxqswcvn99kjxvy6cwzgv3ay8dfa0s9rpaem

@ -1,13 +0,0 @@
{
"recommendations": [
"HashiCorp.terraform",
"britesnow.vscode-toggle-quotes",
"mitchdenny.ecdc",
"ms-kubernetes-tools.vscode-kubernetes-tools",
"oderwat.indent-rainbow",
"redhat.ansible",
"signageos.signageos-vscode-sops",
"usernamehw.errorlens",
"fcrespo82.markdown-table-formatter"
]
}

@ -1,18 +0,0 @@
{
"ansible.ansibleLint.arguments": "-c .github/linters/.ansible-lint",
"files.associations": {
"*.json5": "jsonc",
"**/provision/ansible/**/*.yml": "ansible",
"**/provision/ansible/**/*.sops.yml": "yaml",
"**/provision/ansible/**/inventory/**/*.yml": "yaml",
"**/provision/terraform/**/*.tf": "terraform"
},
"prettier.configPath": ".github/linters/.prettierrc.yaml",
"prettier.ignorePath": ".github/linters/.prettierignore",
"yaml.schemas": {
"Kubernetes": "cluster/*.yaml"
},
"editor.bracketPairColorization.enabled": true,
"editor.guides.bracketPairs":"active",
"editor.hover.delay": 1500
}

@ -0,0 +1,28 @@
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: cluster-secrets
namespace: flux-system
stringData:
SECRET_DOMAIN: ENC[AES256_GCM,data:X1k9AItzPv6n,iv:NmlRl9IkKoqaBfK3S6ejXqRmEJGxn0zrEnxvsxq6kec=,tag:aueB3/aWp7ffpP62jG+BRg==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1elkkx5sqdyvkd8fev4mq9mgxqswcvn99kjxvy6cwzgv3ay8dfa0s9rpaem
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB5WlV2UUJsOFBMM3QwOFpV
OGJYVjdTUHp3L0dPcXoreDR3Qlp4WkdPdkRRClBPSHNuWU5ZNUhqUzVxMVdpd21i
R0dBL0ljamN2aHJEcTVqWTBVNGVkamcKLS0tIGZkVjR2YUhDbmI4SlAyUEdpMytO
b0ZCaEJ1aklBZit4aVNMMFRwRlhHMmMKot9c1nNpIKG+ZoB0j088bC5GmOs9e29/
tPxkSbHID2zZjqLWvQNcgduBpdn9V18DDPI14dwRyPWst6euNd6udA==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-09-12T21:39:16Z"
mac: ENC[AES256_GCM,data:s6Z8Mkrzn168UXB4A5RqsHHssbXdTPToyjxNG1SZ24IevMjxuKg4U2dlmkfBVELkLaYoPUiL8vhDJXJdmfjIbhEek70qNQgs4TLzOD6L6b/g8HXUN+9cb/04lY3xb1hsS49ibH4lD1zVS+Pt/oIek4a5VTmE30fuiIdnAICJcEI=,iv:RPScFFdqeZhg04FNFwidLD/SvzlbMvMvaJsLL9ZNu20=,tag:IImWmympR5RxGF6pXq+2zA==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.1

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-settings
namespace: flux-system
data:
TIMEZONE: Europe/Helsinki

501
configure vendored

@ -1,501 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o pipefail
# shellcheck disable=SC2155
export PROJECT_DIR=$(git rev-parse --show-toplevel)
# shellcheck disable=SC2155
export SOPS_AGE_KEY_FILE=~/.config/sops/age/keys.txt
# shellcheck disable=SC1091
source "${PROJECT_DIR}/.config.env"
show_help() {
cat << EOF
Usage: $(basename "$0") <options>
-h, --help Display help
--verify Verify .config.env settings
EOF
}
main() {
local verify=
parse_command_line "$@"
verify_binaries
if [[ "${verify}" == 1 ]]; then
verify_ansible_hosts
verify_metallb
verify_kubevip
verify_addressing
verify_age
verify_git_repository
verify_cloudflare
verify_success
else
# generate sops configuration file
envsubst < "${PROJECT_DIR}/tmpl/.sops.yaml" \
> "${PROJECT_DIR}/.sops.yaml"
# generate cluster settings
envsubst < "${PROJECT_DIR}/tmpl/cluster/cluster-settings.yaml" \
> "${PROJECT_DIR}/cluster/config/cluster-settings.yaml"
envsubst < "${PROJECT_DIR}/tmpl/cluster/flux-cluster.yaml" \
> "${PROJECT_DIR}/cluster/flux/flux-system/flux-cluster.yaml"
if [[ "${BOOTSTRAP_KUBE_VIP_ENABLED}" == "true" ]]; then
envsubst < "${PROJECT_DIR}/tmpl/cluster/kube-vip-daemonset.yaml" \
> "${PROJECT_DIR}/cluster/apps/kube-system/kube-vip/daemon-set.yaml"
fi
# generate cluster secrets
envsubst < "${PROJECT_DIR}/tmpl/cluster/cluster-secrets.sops.yaml" \
> "${PROJECT_DIR}/cluster/config/cluster-secrets.sops.yaml"
envsubst < "${PROJECT_DIR}/tmpl/cluster/cert-manager-secret.sops.yaml" \
> "${PROJECT_DIR}/cluster/core/cluster-issuers/secret.sops.yaml"
envsubst < "${PROJECT_DIR}/tmpl/cluster/cloudflare-ddns-secret.sops.yaml" \
> "${PROJECT_DIR}/cluster/apps/networking/cloudflare-ddns/secret.sops.yaml"
envsubst < "${PROJECT_DIR}/tmpl/cluster/external-dns-secret.sops.yaml" \
> "${PROJECT_DIR}/cluster/apps/networking/external-dns/secret.sops.yaml"
# encrypt cluster secrets
sops --encrypt --in-place "${PROJECT_DIR}/cluster/config/cluster-secrets.sops.yaml"
sops --encrypt --in-place "${PROJECT_DIR}/cluster/core/cluster-issuers/secret.sops.yaml"
sops --encrypt --in-place "${PROJECT_DIR}/cluster/apps/networking/cloudflare-ddns/secret.sops.yaml"
sops --encrypt --in-place "${PROJECT_DIR}/cluster/apps/networking/external-dns/secret.sops.yaml"
# generate terraform secrets
envsubst < "${PROJECT_DIR}/tmpl/terraform/secret.sops.yaml" \
> "${PROJECT_DIR}/provision/terraform/cloudflare/secret.sops.yaml"
# encrypt terraform secrets
sops --encrypt --in-place "${PROJECT_DIR}/provision/terraform/cloudflare/secret.sops.yaml"
# generate ansible settings
envsubst < "${PROJECT_DIR}/tmpl/ansible/kube-vip.yml" \
> "${PROJECT_DIR}/provision/ansible/inventory/group_vars/kubernetes/kube-vip.yml"
# include kube-vip if enabled
if [[ ${BOOTSTRAP_KUBE_VIP_ENABLED} == "true" ]]; then
if [[ $(yq eval --no-doc 'contains({"resources": ["kube-vip"]})' "${PROJECT_DIR}/cluster/apps/kube-system/kustomization.yaml") == false ]]; then
yq --inplace '.resources += [ "kube-vip" ]' "${PROJECT_DIR}/cluster/apps/kube-system/kustomization.yaml"
fi
if [[ $(yq eval --no-doc 'contains({"k3s_server_manifests_templates": ["kube-vip-daemonset.yaml.j2"]})' "${PROJECT_DIR}/provision/ansible/inventory/group_vars/kubernetes/k3s.yml") == false ]]; then
yq --inplace '.k3s_server_manifests_templates += [ "kube-vip-daemonset.yaml.j2" ]' "${PROJECT_DIR}/provision/ansible/inventory/group_vars/kubernetes/k3s.yml"
fi
if [[ $(yq eval --no-doc 'contains({"k3s_server_manifests_urls": [{"url": "https://kube-vip.io/manifests/rbac.yaml", "filename": "kube-vip-rbac.yaml"}]})' "${PROJECT_DIR}/provision/ansible/inventory/group_vars/kubernetes/k3s.yml") == false ]]; then
yq --inplace '.k3s_server_manifests_urls += [{"url": "https://kube-vip.io/manifests/rbac.yaml", "filename": "kube-vip-rbac.yaml"}]' "${PROJECT_DIR}/provision/ansible/inventory/group_vars/kubernetes/k3s.yml"
fi
fi
# generate ansible hosts file and secrets
generate_ansible_hosts
generate_ansible_host_secrets
setup_github_webhook
success
fi
}
parse_command_line() {
while :; do
case "${1:-}" in
-h|--help)
show_help
exit
;;
--verify)
verify=1
;;
*)
break
;;
esac
shift
done
if [[ -z "$verify" ]]; then
verify=0
fi
}
_has_binary() {
command -v "${1}" >/dev/null 2>&1 || {
_log "ERROR" "${1} is not installed or not found in \$PATH"
exit 1
}
}
_has_optional_envar() {
local option="${1}"
# shellcheck disable=SC2015
[[ "${!option}" == "" ]] && {
_log "WARN" "Unset optional variable ${option}"
} || {
_log "INFO" "Found variable '${option}' with value '${!option}'"
}
}
_has_envar() {
local option="${1}"
# shellcheck disable=SC2015
[[ "${!option}" == "" ]] && {
_log "ERROR" "Unset variable ${option}"
exit 1
} || {
_log "INFO" "Found variable '${option}' with value '${!option}'"
}
}
_has_valid_ip() {
local ip="${1}"
local variable_name="${2}"
if ! ipcalc "${ip}" | awk 'BEGIN{FS=":"; is_invalid=0} /^INVALID/ {is_invalid=1; print $1} END{exit is_invalid}' >/dev/null 2>&1; then
_log "INFO" "Variable '${variable_name}' has an invalid IP address '${ip}'"
exit 1
else
_log "INFO" "Variable '${variable_name}' has a valid IP address '${ip}'"
fi
}
verify_addressing() {
local found_kube_vip="false"
local found_k8s_gateway="false"
local found_ingress="false"
# Verify the metallb min and metallb ceiling are in the same network
metallb_subnet_min=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f1 | cut -d. -f1,2,3)
metallb_subnet_ceil=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f2 | cut -d. -f1,2,3)
if [[ "${metallb_subnet_min}" != "${metallb_subnet_ceil}" ]]; then
_log "ERROR" "The provided MetalLB IP range '${BOOTSTRAP_METALLB_LB_RANGE}' is not in the same subnet"
exit 1
fi
# Verify the node IP addresses are on the same network as the metallb range
for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do
node_subnet=$(echo "${!var}" | cut -d. -f1,2,3)
if [[ "${node_subnet}" != "${metallb_subnet_min}" ]]; then
_log "ERROR" "The subnet for node '${!var}' is not in the same subnet as the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'"
exit 1
fi
done
# Verify the kube-vip IP is in the same network as the metallb range
kubevip_subnet=$(echo "${BOOTSTRAP_KUBE_VIP_ADDR}" | cut -d. -f1,2,3)
if [[ "${kubevip_subnet}" != "${metallb_subnet_min}" ]]; then
_log "ERROR" "The subnet for kupe-vip '${BOOTSTRAP_KUBE_VIP_ADDR}' is not the same subnet as the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'"
exit 1
fi
# Depending on the IP address, verify if it should be in the metallb range or not
metallb_octet_min=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f1 | cut -d. -f4)
metallb_octet_ceil=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f2 | cut -d. -f4)
for (( octet=metallb_octet_min; octet<=metallb_octet_ceil; octet++ )); do
addr="${metallb_subnet_min}.${octet}"
if [[ "${addr}" == "${BOOTSTRAP_KUBE_VIP_ADDR}" ]]; then
found_kube_vip="true"
fi
if [[ "${addr}" == "${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}" ]]; then
found_k8s_gateway="true"
fi
if [[ "${addr}" == "${BOOTSTRAP_METALLB_INGRESS_ADDR}" ]]; then
found_ingress="true"
fi
for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do
if [[ "${!var}" == "${addr}" ]]; then
_log "ERROR" "The IP for node '${!var}' should NOT be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'"
exit 1
fi
done
done
if [[ "${found_kube_vip}" == "true" ]]; then
_log "ERROR" "The IP for kube-vip '${BOOTSTRAP_KUBE_VIP_ADDR}' should NOT be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'"
exit 1
fi
if [[ "${found_k8s_gateway}" == "false" ]]; then
_log "ERROR" "The IP for k8s_gateway '${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}' should be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'"
exit 1
fi
if [[ "${found_ingress}" == "false" ]]; then
_log "ERROR" "The IP for ingress '${BOOTSTRAP_METALLB_INGRESS_ADDR}' should be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'"
exit 1
fi
}
verify_age() {
_has_envar "BOOTSTRAP_AGE_PUBLIC_KEY"
_has_envar "SOPS_AGE_KEY_FILE"
if [[ ! "$BOOTSTRAP_AGE_PUBLIC_KEY" =~ ^age.* ]]; then
_log "ERROR" "BOOTSTRAP_AGE_PUBLIC_KEY does not start with age"
exit 1
else
_log "INFO" "Age public key is in the correct format"
fi
if [[ ! -f ~/.config/sops/age/keys.txt ]]; then
_log "ERROR" "Unable to find Age file keys.txt in ~/.config/sops/age"
exit 1
else
_log "INFO" "Found Age public key '${BOOTSTRAP_AGE_PUBLIC_KEY}'"
fi
}
verify_binaries() {
_has_binary "ansible"
_has_binary "envsubst"
_has_binary "flux"
_has_binary "git"
_has_binary "age"
_has_binary "helm"
_has_binary "ipcalc"
_has_binary "jq"
_has_binary "yq"
_has_binary "sops"
_has_binary "ssh"
_has_binary "task"
_has_binary "terraform"
_has_binary "tflint"
}
verify_kubevip() {
_has_envar "BOOTSTRAP_KUBE_VIP_ADDR"
_has_valid_ip "${BOOTSTRAP_KUBE_VIP_ADDR}" "BOOTSTRAP_KUBE_VIP_ADDR"
}
verify_metallb() {
local ip_floor=
local ip_ceil=
_has_envar "BOOTSTRAP_METALLB_LB_RANGE"
_has_envar "BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR"
_has_envar "BOOTSTRAP_METALLB_INGRESS_ADDR"
ip_floor=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f1)
ip_ceil=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f2)
_has_valid_ip "${ip_floor}" "BOOTSTRAP_METALLB_LB_RANGE"
_has_valid_ip "${ip_ceil}" "BOOTSTRAP_METALLB_LB_RANGE"
_has_valid_ip "${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}" "BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR"
_has_valid_ip "${BOOTSTRAP_METALLB_INGRESS_ADDR}" "BOOTSTRAP_METALLB_INGRESS_ADDR"
}
verify_git_repository() {
_has_envar "BOOTSTRAP_GIT_REPOSITORY"
export GIT_TERMINAL_PROMPT=0
pushd "$(mktemp -d)" >/dev/null 2>&1
[ "$(git ls-remote "${BOOTSTRAP_GIT_REPOSITORY}" 2> /dev/null)" ] || {
_log "ERROR" "Unable to find the remote Git repository '${BOOTSTRAP_GIT_REPOSITORY}'"
exit 1
}
popd >/dev/null 2>&1
export GIT_TERMINAL_PROMPT=1
}
setup_github_webhook() {
WEBHOOK_SECRET="${BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET:-ignored}"
if [[ "${WEBHOOK_SECRET}" != "ignored" ]]; then
if [[ "${WEBHOOK_SECRET}" == "generated" ]]; then
WEBHOOK_SECRET="$(openssl rand -base64 30)"
fi
export BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET="${WEBHOOK_SECRET}"
_log "INFO" "Using GitHub Token '${WEBHOOK_SECRET}' for Flux"
cp -rf "${PROJECT_DIR}/tmpl/cluster/flux-system/webhooks" "${PROJECT_DIR}/cluster/apps/flux-system"
envsubst < "${PROJECT_DIR}/tmpl/cluster/flux-system/webhooks/github/secret.sops.yaml" \
> "${PROJECT_DIR}/cluster/apps/flux-system/webhooks/github/secret.sops.yaml"
sops --encrypt --in-place "${PROJECT_DIR}/cluster/apps/flux-system/webhooks/github/secret.sops.yaml"
if [[ $(yq eval --no-doc 'contains({"resources": ["webhooks"]})' "${PROJECT_DIR}/cluster/apps/flux-system/kustomization.yaml") == false ]]; then
yq --inplace '.resources += [ "webhooks" ]' "${PROJECT_DIR}/cluster/apps/flux-system/kustomization.yaml"
fi
fi
}
verify_cloudflare() {
local account_zone=
local errors=
_has_envar "BOOTSTRAP_CLOUDFLARE_APIKEY"
_has_envar "BOOTSTRAP_CLOUDFLARE_DOMAIN"
_has_envar "BOOTSTRAP_CLOUDFLARE_EMAIL"
# Try to retrieve zone information from Cloudflare's API
account_zone=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=${BOOTSTRAP_CLOUDFLARE_DOMAIN}&status=active" \
-H "X-Auth-Email: ${BOOTSTRAP_CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${BOOTSTRAP_CLOUDFLARE_APIKEY}" \
-H "Content-Type: application/json"
)
if [[ "$(echo "${account_zone}" | jq ".success")" == "true" ]]; then
_log "INFO" "Verified Cloudflare Account and Zone information"
else
errors=$(echo "${account_zone}" | jq -c ".errors")
_log "ERROR" "Unable to get Cloudflare Account and Zone information ${errors}"
exit 1
fi
}
verify_ansible_hosts() {
local node_id=
local node_addr=
local node_username=
local node_password=
local node_control=
local node_hostname=
local default_control_node_prefix=
local default_worker_node_prefix=
default_control_node_prefix="BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX"
default_worker_node_prefix="BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX"
_has_optional_envar "${default_control_node_prefix}"
_has_optional_envar "${default_worker_node_prefix}"
for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do
node_id=$(echo "${var}" | awk -F"_" '{print $5}')
node_addr="BOOTSTRAP_ANSIBLE_HOST_ADDR_${node_id}"
node_username="BOOTSTRAP_ANSIBLE_SSH_USERNAME_${node_id}"
node_password="BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_${node_id}"
node_control="BOOTSTRAP_ANSIBLE_CONTROL_NODE_${node_id}"
node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}"
_has_envar "${node_addr}"
_has_envar "${node_username}"
_has_envar "${node_password}"
_has_envar "${node_control}"
_has_optional_envar "${node_hostname}"
if [[ "${!node_addr}" == "${BOOTSTRAP_KUBE_VIP_ADDR}" && "${BOOTSTRAP_KUBE_VIP_ENABLED}" == "true" ]]; then
_log "ERROR" "The kube-vip IP '${BOOTSTRAP_KUBE_VIP_ADDR}' should not be the same as the IP for node '${!node_addr}'"
exit 1
fi
if [[ "${!node_addr}" == "${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}" ]]; then
_log "ERROR" "The k8s-gateway load balancer IP '${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}' should not be the same as the IP for node '${!node_addr}'"
exit 1
fi
if [[ "${!node_addr}" == "${BOOTSTRAP_METALLB_INGRESS_ADDR}" ]]; then
_log "ERROR" "The ingress load balancer IP '${BOOTSTRAP_METALLB_INGRESS_ADDR}' should not be the same as the IP for node '${!node_addr}'"
exit 1
fi
if ssh -q -o BatchMode=yes -o ConnectTimeout=5 "${!node_username}"@"${!var}" "true"; then
_log "INFO" "SSH into host '${!var}' with username '${!node_username}' was successfull"
else
_log "ERROR" "SSH into host '${!var}' with username '${!node_username}'was NOT successful, did you copy over your SSH key?"
exit 1
fi
done
}
verify_success() {
_log "INFO" "All checks passed!"
_log "INFO" "Run the script without --verify to template all the files out"
exit 0
}
generate_ansible_host_secrets() {
local node_id=
local node_username=
local node_password=
local node_hostname=
default_control_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX:-k8s-}
default_worker_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX:-k8s-}
for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do
node_id=$(echo "${var}" | awk -F"_" '{print $5}')
if [[ "${!node_control}" == "true" ]]; then
node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}"
host_key="${!node_hostname:-${default_control_node_prefix}}"
if [ "${host_key}" == "${default_control_node_prefix}" ]; then
node_hostname=${default_control_node_prefix}${node_id}
else
node_hostname=${!node_hostname}
fi
else
node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}"
host_key="${!node_hostname:-${default_worker_node_prefix}}"
if [ "${host_key}" == "${default_worker_node_prefix}" ]; then
node_hostname=${default_worker_node_prefix}${node_id}
else
node_hostname=${!node_hostname}
fi
fi
{
node_username="BOOTSTRAP_ANSIBLE_SSH_USERNAME_${node_id}"
node_password="BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_${node_id}"
printf "kind: Secret\n"
printf "ansible_user: %s\n" "${!node_username}"
printf "ansible_become_pass: %s\n" "${!node_password}"
} > "${PROJECT_DIR}/provision/ansible/inventory/host_vars/${node_hostname}.sops.yml"
sops --encrypt --in-place "${PROJECT_DIR}/provision/ansible/inventory/host_vars/${node_hostname}.sops.yml"
done
}
generate_ansible_hosts() {
local worker_node_count=
default_control_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX:-k8s-}
default_worker_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX:-k8s-}
{
printf -- "---\n"
printf "kubernetes:\n"
printf " children:\n"
printf " master:\n"
printf " hosts:\n"
master_node_count=0
worker_node_count=0
for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do
node_id=$(echo "${var}" | awk -F"_" '{print $5}')
node_control="BOOTSTRAP_ANSIBLE_CONTROL_NODE_${node_id}"
if [[ "${!node_control}" == "true" ]]; then
master_node_count=$((master_node_count+1))
node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}"
host_key="${!node_hostname:-${default_control_node_prefix}}"
if [ "${host_key}" == "${default_control_node_prefix}" ]; then
node_hostname=${default_control_node_prefix}${node_id}
else
node_hostname=${!node_hostname}
fi
printf " %s:\n" "${node_hostname}"
printf " ansible_host: %s\n" "${!var}"
else
worker_node_count=$((worker_node_count+1))
fi
done
if [[ ${worker_node_count} -gt 0 ]]; then
printf " worker:\n"
printf " hosts:\n"
for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do
node_id=$(echo "${var}" | awk -F"_" '{print $5}')
node_control="BOOTSTRAP_ANSIBLE_CONTROL_NODE_${node_id}"
if [[ "${!node_control}" == "false" ]]; then
node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}"
host_key="${!node_hostname:-${default_worker_node_prefix}}"
if [ "${host_key}" == "${default_worker_node_prefix}" ]; then
node_hostname=${default_worker_node_prefix}${node_id}
else
node_hostname=${!node_hostname}
fi
printf " %s:\n" "${node_hostname}"
printf " ansible_host: %s\n" "${!var}"
fi
done
fi
} > "${PROJECT_DIR}/provision/ansible/inventory/hosts.yml"
}
success() {
_log "INFO" "All files have been templated, proceed to the next steps outlined in the README"
exit 0
}
_log() {
local type="${1}"
local msg="${2}"
printf "[%s] [%s] %s\n" "$(date -u)" "${type}" "${msg}"
}
main "$@"

@ -24,14 +24,5 @@ k3s_etcd_datastore: true
k3s_use_unsupported_config: true
# (string) Control Plane registration address
k3s_registration_address: "{{ kubevip_address }}"
# (list) A list of URLs to deploy on the primary control plane. Read notes below.
k3s_server_manifests_urls:
- url: https://docs.projectcalico.org/archive/v3.22/manifests/tigera-operator.yaml
filename: tigera-operator.yaml
# (list) A flat list of templates to deploy on the primary control plane
# /var/lib/rancher/k3s/server/manifests
k3s_server_manifests_templates:
- "calico-installation.yaml.j2"
#k3s_registration_address: "10.0.210.10"
k3s_registration_address: "[2001:67c:1104:ad::10]"

@ -5,31 +5,32 @@
# (bool) Specify if a host (or host group) are part of the control plane
k3s_control_node: true
k3s_disable_kube_proxy: true
# (dict) k3s settings for all control-plane nodes
k3s_server:
node-ip: "{{ ansible_host }}"
node-ip: "{{ ansible_default_ipv6.address }}"
#node-ip: "{{ ansible_default_ipv6.address }},{{ ansible_host }}"
# node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}"
tls-san:
# kube-vip
- "{{ kubevip_address }}"
# - "{{ k3s_registration_address }}"
- "{{ ansible_default_ipv6.address }}"
# Disable Docker - this will use the default containerd CRI
docker: false
flannel-backend: "none" # This needs to be in quotes
flannel-backend: "host-gw" # This needs to be in quotes
disable:
# Disable flannel - replaced with Calico
- flannel
# Disable traefik - replaced with ingress-nginx
- traefik
# Disable servicelb - replaced with metallb and install with Flux
- servicelb
# Disable metrics-server - installed with Flux
- metrics-server
disable-network-policy: true
disable-cloud-controller: true
write-kubeconfig-mode: "644"
# Network CIDR to use for pod IPs
cluster-cidr: "10.42.0.0/16"
# cluster-cidr: "10.45.0.0/16,fd88:3377:4473::/48"
cluster-cidr: "2001:67c:1104:c000::/56"
# Network CIDR to use for service IPs
service-cidr: "10.43.0.0/16"
# service-cidr: "10.46.0.0/16,2001:67c:1104:ad::e:0/112"
service-cidr: "2001:67c:1104:ad::e:0/112"
kubelet-arg:
# Allow k8s services to contain TCP and UDP on the same port
- "feature-gates=MixedProtocolLBService=true"
@ -38,11 +39,6 @@ k3s_server:
- "feature-gates=MixedProtocolLBService=true"
# Required to monitor kube-controller-manager with kube-prometheus-stack
- "bind-address=0.0.0.0"
kube-proxy-arg:
# Allow k8s services to contain TCP and UDP on the same port
- "feature-gates=MixedProtocolLBService=true"
# Required to monitor kube-proxy with kube-prometheus-stack
- "metrics-bind-address=0.0.0.0"
kube-scheduler-arg:
# Allow k8s services to contain TCP and UDP on the same port
- "feature-gates=MixedProtocolLBService=true"
@ -53,5 +49,3 @@ k3s_server:
kube-apiserver-arg:
# Allow k8s services to contain TCP and UDP on the same port
- "feature-gates=MixedProtocolLBService=true"
# Required for HAProxy health-checks
- "anonymous-auth=true"

@ -7,7 +7,8 @@ k3s_control_node: false
# (dict) k3s settings for all worker nodes
k3s_agent:
node-ip: "{{ ansible_host }}"
#node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}"
node-ip: "{{ ansible_default_ipv6.address }}"
kubelet-arg:
# Allow k8s services to contain TCP and UDP on the same port
- "feature-gates=MixedProtocolLBService=true"

@ -14,10 +14,10 @@ kubernetes:
ansible_host: 10.0.210.11
k3s_become: true
k8s-worker2:
ansible_host: 10.0.210.12
ansible_host: 10.0.210.13
k3s_become: true
k8s-worker3:
ansible_host: 10.0.210.13
ansible_host: 10.0.210.12
k3s_become: true
vars:
ansible_user: sm

@ -9,14 +9,14 @@
- name: nuke
prompt: |-
Are you sure you want to nuke this cluster?
Type YES I WANT TO DESTROY THIS CLUSTER to proceed
Type YES to proceed
default: "n"
private: false
pre_tasks:
- name: Check for confirmation
ansible.builtin.fail:
msg: Aborted nuking the cluster
when: nuke != 'YES I WANT TO DESTROY THIS CLUSTER'
when: nuke != 'YES'
- name: Pausing for 5 seconds...
ansible.builtin.pause:

@ -1,101 +0,0 @@
terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "3.22.0"
}
http = {
source = "hashicorp/http"
version = "3.0.1"
}
sops = {
source = "carlpett/sops"
version = "0.7.1"
}
}
}
data "sops_file" "cloudflare_secrets" {
source_file = "secret.sops.yaml"
}
provider "cloudflare" {
email = data.sops_file.cloudflare_secrets.data["cloudflare_email"]
api_key = data.sops_file.cloudflare_secrets.data["cloudflare_apikey"]
}
data "cloudflare_zones" "domain" {
filter {
name = data.sops_file.cloudflare_secrets.data["cloudflare_domain"]
}
}
resource "cloudflare_zone_settings_override" "cloudflare_settings" {
zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id")
settings {
# /ssl-tls
ssl = "strict"
# /ssl-tls/edge-certificates
always_use_https = "on"
min_tls_version = "1.2"
opportunistic_encryption = "on"
tls_1_3 = "zrt"
automatic_https_rewrites = "on"
universal_ssl = "on"
# /firewall/settings
browser_check = "on"
challenge_ttl = 1800
privacy_pass = "on"
security_level = "medium"
# /speed/optimization
brotli = "on"
minify {
css = "on"
js = "on"
html = "on"
}
rocket_loader = "on"
# /caching/configuration
always_online = "off"
development_mode = "off"
# /network
http3 = "on"
zero_rtt = "on"
ipv6 = "on"
websockets = "on"
opportunistic_onion = "on"
pseudo_ipv4 = "off"
ip_geolocation = "on"
# /content-protection
email_obfuscation = "on"
server_side_exclude = "on"
hotlink_protection = "off"
# /workers
security_header {
enabled = false
}
}
}
data "http" "ipv4" {
url = "http://ipv4.icanhazip.com"
}
resource "cloudflare_record" "ipv4" {
name = "ipv4"
zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id")
value = chomp(data.http.ipv4.response_body)
proxied = true
type = "A"
ttl = 1
}
resource "cloudflare_record" "root" {
name = data.sops_file.cloudflare_secrets.data["cloudflare_domain"]
zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id")
value = "ipv4.${data.sops_file.cloudflare_secrets.data["cloudflare_domain"]}"
proxied = true
type = "CNAME"
ttl = 1
}

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-settings
namespace: flux-system
data:
TIMEZONE: ${BOOTSTRAP_TIMEZONE}
METALLB_LB_RANGE: ${BOOTSTRAP_METALLB_LB_RANGE}
METALLB_K8S_GATEWAY_ADDR: ${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}
METALLB_INGRESS_ADDR: ${BOOTSTRAP_METALLB_INGRESS_ADDR}
Loading…
Cancel
Save