Initial commit

pull/2/head
Skyler 3 months ago
commit b7312092c9
  1. 97
      .config.sample.env
  2. 5
      .envrc
  3. 1
      .gitattributes
  4. 2
      .github/CODEOWNERS
  5. 12
      .github/ISSUE_TEMPLATE/bug-report.md
  6. 6
      .github/ISSUE_TEMPLATE/config.yml
  7. 12
      .github/ISSUE_TEMPLATE/feature-request.md
  8. 12
      .github/ISSUE_TEMPLATE/question.md
  9. 20
      .github/PULL_REQUEST_TEMPLATE.md
  10. 3
      .github/linters/.ansible-lint
  11. 23
      .github/linters/.markdownlint.yaml
  12. 2
      .github/linters/.prettierignore
  13. 5
      .github/linters/.prettierrc.yaml
  14. 0
      .github/linters/.tflint.hcl
  15. 18
      .github/linters/.yamllint.yaml
  16. 104
      .github/renovate.json5
  17. 61
      .github/workflows/lint.yaml
  18. 19
      .gitignore
  19. 37
      .pre-commit-config.yaml
  20. 2
      .secretlintignore
  21. 17
      .sourceignore
  22. 68
      .taskfiles/AnsibleTasks.yml
  23. 82
      .taskfiles/ClusterTasks.yml
  24. 19
      .taskfiles/PrecommitTasks.yml
  25. 14
      .taskfiles/SopsTasks.yml
  26. 22
      .taskfiles/TerraformTasks.yml
  27. 13
      .vscode/extensions.json
  28. 18
      .vscode/settings.json
  29. 21
      LICENSE
  30. 541
      README.md
  31. 63
      Taskfile.yml
  32. 35
      ansible.cfg
  33. 75
      cluster/apps/default/echo-server/helm-release.yaml
  34. 5
      cluster/apps/default/echo-server/kustomization.yaml
  35. 13
      cluster/apps/default/hajimari/config-pvc.yaml
  36. 134
      cluster/apps/default/hajimari/helm-release.yaml
  37. 6
      cluster/apps/default/hajimari/kustomization.yaml
  38. 7
      cluster/apps/default/kustomization.yaml
  39. 7
      cluster/apps/default/namespace.yaml
  40. 5
      cluster/apps/flux-system/kustomization.yaml
  41. 7
      cluster/apps/flux-system/namespace.yaml
  42. 37
      cluster/apps/kube-system/cert-manager/helm-release.yaml
  43. 4
      cluster/apps/kube-system/cert-manager/kustomization.yaml
  44. 9
      cluster/apps/kube-system/kube-vip/kustomization.yaml
  45. 44
      cluster/apps/kube-system/kube-vip/rbac.yaml
  46. 32
      cluster/apps/kube-system/kured/helm-release.yaml
  47. 5
      cluster/apps/kube-system/kured/kustomization.yaml
  48. 9
      cluster/apps/kube-system/kustomization.yaml
  49. 30
      cluster/apps/kube-system/metrics-server/helm-release.yaml
  50. 5
      cluster/apps/kube-system/metrics-server/kustomization.yaml
  51. 7
      cluster/apps/kube-system/namespace.yaml
  52. 26
      cluster/apps/kube-system/reloader/helm-release.yaml
  53. 5
      cluster/apps/kube-system/reloader/kustomization.yaml
  54. 9
      cluster/apps/kustomization.yaml
  55. 39
      cluster/apps/networking/cloudflare-ddns/cloudflare-ddns.sh
  56. 42
      cluster/apps/networking/cloudflare-ddns/cron-job.yaml
  57. 15
      cluster/apps/networking/cloudflare-ddns/kustomization.yaml
  58. 55
      cluster/apps/networking/external-dns/helm-release.yaml
  59. 6
      cluster/apps/networking/external-dns/kustomization.yaml
  60. 1
      cluster/apps/networking/ingress-nginx/cloudflare-proxied-networks.txt
  61. 84
      cluster/apps/networking/ingress-nginx/helm-release.yaml
  62. 12
      cluster/apps/networking/ingress-nginx/kustomization.yaml
  63. 37
      cluster/apps/networking/k8s-gateway/helm-release.yaml
  64. 5
      cluster/apps/networking/k8s-gateway/kustomization.yaml
  65. 10
      cluster/apps/networking/kustomization.yaml
  66. 30
      cluster/apps/networking/metallb/helm-release.yaml
  67. 5
      cluster/apps/networking/metallb/kustomization.yaml
  68. 7
      cluster/apps/networking/namespace.yaml
  69. 6
      cluster/apps/system-upgrade/kustomization.yaml
  70. 7
      cluster/apps/system-upgrade/namespace.yaml
  71. 21
      cluster/apps/system-upgrade/system-upgrade-controller/kustomization.yaml
  72. 26
      cluster/apps/system-upgrade/system-upgrade-controller/plans/agent.yaml
  73. 6
      cluster/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml
  74. 23
      cluster/apps/system-upgrade/system-upgrade-controller/plans/server.yaml
  75. 5
      cluster/bootstrap/kustomization.yaml
  76. 9
      cluster/charts/bitnami.yaml
  77. 9
      cluster/charts/bjw-s.yaml
  78. 9
      cluster/charts/external-dns.yaml
  79. 9
      cluster/charts/hajimari.yaml
  80. 9
      cluster/charts/ingress-nginx.yaml
  81. 9
      cluster/charts/jetstack.yaml
  82. 9
      cluster/charts/k8s-gateway.yaml
  83. 14
      cluster/charts/kustomization.yaml
  84. 9
      cluster/charts/metallb.yaml
  85. 9
      cluster/charts/metrics-server.yaml
  86. 9
      cluster/charts/stakater.yaml
  87. 9
      cluster/charts/weaveworks-kured.yaml
  88. 5
      cluster/config/kustomization.yaml
  89. 6
      cluster/core/cluster-issuers/kustomization.yaml
  90. 21
      cluster/core/cluster-issuers/letsencrypt-production.yaml
  91. 21
      cluster/core/cluster-issuers/letsencrypt-staging.yaml
  92. 5
      cluster/core/kustomization.yaml
  93. 6
      cluster/crds/cert-manager/kustomization.yaml
  94. 6
      cluster/crds/kustomization.yaml
  95. 6
      cluster/crds/system-upgrade-controller/kustomization.yaml
  96. 25
      cluster/flux/apps.yaml
  97. 13
      cluster/flux/charts.yaml
  98. 17
      cluster/flux/config.yaml
  99. 27
      cluster/flux/core.yaml
  100. 13
      cluster/flux/crds.yaml
  101. Some files were not shown because too many files have changed in this diff Show More

@ -0,0 +1,97 @@
#
# Cluster related variables
#
# The repo you created from this template
# e.g. https://github.com/onedr0p/home-cluster
export BOOTSTRAP_GIT_REPOSITORY=""
# Optional: Enable GitHub to send a webhook to update Flux, set the following to one of:
# ignored - this feature will be disabled
# generated - this will generate a token and print it in the logs
# Set this to any other string and it will be used for the secret
export BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET="ignored"
# Choose one of your cloudflare domains
# e.g. onedr0p.com
export BOOTSTRAP_CLOUDFLARE_DOMAIN=""
# The email you use to sign into Cloudflare with
export BOOTSTRAP_CLOUDFLARE_EMAIL=""
# Your global Cloudflare API Key
export BOOTSTRAP_CLOUDFLARE_APIKEY=""
# Pick a range of unused IPs that are on the same network as your nodes
# You don't need many IPs, just choose 10 IPs to start with
# e.g. 192.168.1.220-192.168.1.230
export BOOTSTRAP_METALLB_LB_RANGE=""
# The load balancer IP for k8s_gateway, choose from one of the available IPs above
# e.g. 192.168.1.220
export BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR=""
# The load balancer IP for the ingress controller, choose from one of the available IPs above
# that doesn't conflict with any other IP addresses here
# e.g. 192.168.1.221
export BOOTSTRAP_METALLB_INGRESS_ADDR=""
# Age Public Key - string should start with age
# e.g. age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta
export BOOTSTRAP_AGE_PUBLIC_KEY=""
# Enable / Disable kube-vip
# If this is set to false (e.g. on a cluster with a single-node control plane), do not install kube-vip.
# BOOTSTRAP_KUBE_VIP_ADDR needs to be set to the IP the API Server is reachable under
export BOOTSTRAP_KUBE_VIP_ENABLED="true"
# The IP Address to use with kube-vip
# Pick a unused IP that is on the same network as your nodes
# and outside the ${BOOTSTRAP_METALLB_LB_RANGE} range
# and doesn't conflict with any other IP addresses here
# e.g. 192.168.1.254
export BOOTSTRAP_KUBE_VIP_ADDR=""
# Choose your timezone
# Used to set your systems timezone
# and Kured timezone
# e.g. America/New_York
export BOOTSTRAP_TIMEZONE="Etc/UTC"
#
# Ansible related variables
#
#
# Default prefixes for hostnames assigned by Ansible
# These are unused on nodes where BOOTSTRAP_ANSIBLE_HOSTNAME_ is provided
#
export BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX="k8s-"
export BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX="k8s-"
#
# Ansible hosts - repeat this block as many times as you need,
# incrementing the last digit on the variable name for each node
#
# Host IP Address to the control plane node
# That doesn't conflict with any other IP addresses here
# e.g. 192.168.1.200
export BOOTSTRAP_ANSIBLE_HOST_ADDR_0=""
# User Ansible will log into the nodes
export BOOTSTRAP_ANSIBLE_SSH_USERNAME_0=""
# Password Ansible will use to escalate to sudo
export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_0=""
# Set this node as a control node (true/false)
export BOOTSTRAP_ANSIBLE_CONTROL_NODE_0=""
# Optional: Set the hostname of the node, if set this will override the *_HOSTNAME_PREFIX vars above
export BOOTSTRAP_ANSIBLE_HOSTNAME_0=""
# export BOOTSTRAP_ANSIBLE_HOST_ADDR_1=""
# export BOOTSTRAP_ANSIBLE_SSH_USERNAME_1=""
# export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_1=""
# export BOOTSTRAP_ANSIBLE_CONTROL_NODE_1=""
# export BOOTSTRAP_ANSIBLE_HOSTNAME_1=""
# export BOOTSTRAP_ANSIBLE_HOST_ADDR_2=""
# export BOOTSTRAP_ANSIBLE_SSH_USERNAME_2=""
# export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_2=""
# export BOOTSTRAP_ANSIBLE_CONTROL_NODE_2=""
# export BOOTSTRAP_ANSIBLE_HOSTNAME_2=""

@ -0,0 +1,5 @@
#shellcheck disable=SC2148,SC2155
export KUBECONFIG=$(expand_path ./provision/kubeconfig)
export ANSIBLE_CONFIG=$(expand_path ./ansible.cfg)
export ANSIBLE_HOST_KEY_CHECKING="False"
export SOPS_AGE_KEY_FILE=$(expand_path ~/.config/sops/age/keys.txt)

1
.gitattributes vendored

@ -0,0 +1 @@
*.sops.* diff=sopsdiffer

@ -0,0 +1,2 @@
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
* @onedr0p

@ -0,0 +1,12 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: kind/bug
assignees: ''
---
# Details
<!-- Note: This should be a clear and concise description of what the bug is. -->

@ -0,0 +1,6 @@
---
blank_issues_enabled: false
contact_links:
- name: Discuss on Discord
url: https://discord.gg/k8s-at-home
about: Join our Discord community

@ -0,0 +1,12 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: kind/enhancement
assignees: ''
---
# Details
<!-- Note: A clear and concise description of what you want to happen. -->

@ -0,0 +1,12 @@
---
name: Question
about: Ask a question to the maintainer
title: ''
labels: kind/question
assignees: ''
---
# Details
<!-- Note: A clear and concise query of what you want to ask. -->

@ -0,0 +1,20 @@
**Description of the change**
<!-- Describe the scope of your change - i.e. what the change does. -->
**Benefits**
<!-- What benefits will be realized by the code change? -->
**Possible drawbacks**
<!-- Describe any known limitations with your change -->
**Applicable issues**
<!-- Enter any applicable Issues here (You can reference an issue using #) -->
- fixes #
**Additional information**
<!-- If there's anything else that's important and relevant to your pull request, mention that information here.-->

@ -0,0 +1,3 @@
# .ansible-lint
warn_list:
- unnamed-task

@ -0,0 +1,23 @@
---
default: true
# MD013/line-length - Line length
MD013:
# Number of characters
line_length: 240
# Number of characters for headings
heading_line_length: 80
# Number of characters for code blocks
code_block_line_length: 80
# Include code blocks
code_blocks: true
# Include tables
tables: true
# Include headings
headings: true
# Include headings
headers: true
# Strict length checking
strict: false
# Stern length checking
stern: false

@ -0,0 +1,2 @@
*.sops.*
gotk-components.yaml

@ -0,0 +1,5 @@
---
trailingComma: "es5"
tabWidth: 2
semi: false
singleQuote: false

@ -0,0 +1,18 @@
---
ignore: |
*.sops.*
gotk-components.yaml
extends: default
rules:
truthy:
allowed-values: ["true", "false", "on"]
comments:
min-spaces-from-content: 1
line-length: disable
braces:
min-spaces-inside: 0
max-spaces-inside: 1
brackets:
min-spaces-inside: 0
max-spaces-inside: 0
indentation: enable

@ -0,0 +1,104 @@
{
"enabled": true,
"semanticCommits": "enabled",
"dependencyDashboard": true,
"dependencyDashboardTitle": "Renovate Dashboard ๐Ÿค–",
"suppressNotifications": ["prIgnoreNotification"],
"rebaseWhen": "conflicted",
"schedule": ["every sunday"],
"pre-commit": {
"enabled": true
},
"flux": {
"fileMatch": ["cluster/.+\\.ya?ml$"]
},
"helm-values": {
"fileMatch": ["cluster/.+\\.ya?ml$"]
},
"kubernetes": {
"fileMatch": [
"cluster/.+\\.ya?ml$",
"provision/ansible/.+\\.ya?ml.j2$"
]
},
"regexManagers": [
{
"description": "Process CRD dependencies",
"fileMatch": ["cluster/crds/.+\\.ya?ml$"],
"matchStrings": [
// GitRepository and Flux Kustomization where 'Git release/tag' matches 'Helm' version
"registryUrl=(?<registryUrl>\\S+) chart=(?<depName>\\S+)\n.*?(?<currentValue>[^-\\s]*)\n",
// Kustomization where 'GitHub release artifact URL' matches 'Docker image' version
"datasource=(?<datasource>\\S+) image=(?<depName>\\S+)\n.*?-\\s(.*?)\/(?<currentValue>[^/]+)\/[^/]+\n"
],
"datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}helm{{/if}}"
},
{
"description": "Process various other dependencies",
"fileMatch": [
"provision/ansible/.+\\.ya?ml$",
"cluster/.+\\.ya?ml$"
],
"matchStrings": [
"datasource=(?<datasource>\\S+) depName=(?<depName>\\S+)( versioning=(?<versioning>\\S+))?\n.*?\"(?<currentValue>.*)\"\n"
],
"datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}",
"versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}"
}
],
"packageRules": [
// setup datasources
{
"matchDatasources": ["helm"],
"separateMinorPatch": true,
"ignoreDeprecated": true
},
// global docker datasource settings
{
"matchDatasources": ["docker"],
"enabled": true,
"commitMessageTopic": "container image {{depName}}",
"commitMessageExtra": "to {{#if isSingleVersion}}v{{{newVersion}}}{{else}}{{{newValue}}}{{/if}}",
"matchUpdateTypes": ["major", "minor", "patch"]
},
{
"description": "Group Cert-Manager image (for CRDs) and chart",
"groupName": "Cert-Manager",
"matchPackagePatterns": ["cert-manager"],
"matchDatasources": ["docker", "helm"],
"group": { "commitMessageTopic": "{{{groupName}}} group" },
"separateMinorPatch": true
},
// add labels according to package and update types
{
"matchDatasources": ["docker"],
"matchUpdateTypes": ["major"],
"labels": ["renovate/image", "dep/major"]
},
{
"matchDatasources": ["docker"],
"matchUpdateTypes": ["minor"],
"labels": ["renovate/image", "dep/minor"]
},
{
"matchDatasources": ["docker"],
"matchUpdateTypes": ["patch"],
"labels": ["renovate/image", "dep/patch"]
},
{
"matchDatasources": ["helm"],
"matchUpdateTypes": ["major"],
"labels": ["renovate/helm", "dep/major"]
},
{
"matchDatasources": ["helm"],
"matchUpdateTypes": ["minor"],
"labels": ["renovate/helm", "dep/minor"]
},
{
"matchDatasources": ["helm"],
"matchUpdateTypes": ["patch"],
"labels": ["renovate/helm", "dep/patch"]
}
]
}

@ -0,0 +1,61 @@
---
name: Lint
on: # yamllint disable-line rule:truthy
workflow_dispatch:
pull_request:
branches:
- main
concurrency:
group: ${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
env:
# Currently no way to detect automatically
DEFAULT_BRANCH: main
jobs:
build:
name: MegaLinter
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: MegaLinter
uses: oxsecurity/megalinter@v6.6.0
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
VALIDATE_ALL_CODEBASE: ${{ github.event_name == 'workflow_dispatch' }}
ENABLE_LINTERS: |-
${{
join(
fromJSON('
[
"ACTION_ACTIONLINT",
"ANSIBLE_ANSIBLE_LINT",
"COPYPASTE_JSCPD",
"KUBERNETES_KUBEVAL",
"MARKDOWN_MARKDOWNLINT",
"REPOSITORY_GIT_DIFF",
"REPOSITORY_SECRETLINT",
"TERRAFORM_TERRAFORM_FMT",
"YAML_PRETTIER",
"YAML_YAMLLINT"
]
'),
','
)
}}
ANSIBLE_DIRECTORY: provision/ansible
ANSIBLE_ANSIBLE_LINT_CONFIG_FILE: .github/linters/.ansible-lint
KUBERNETES_DIRECTORY: cluster
KUBERNETES_KUBEVAL_ARGUMENTS: --ignore-missing-schemas
MARKDOWN_MARKDOWNLINT_CONFIG_FILE: .github/linters/.markdownlint.yaml
MARKDOWN_MARKDOWNLINT_RULES_PATH: .github/
YAML_YAMLLINT_CONFIG_FILE: .github/linters/.yamllint.yaml
YAML_PRETTIER_CONFIG_FILE: .github/linters/.prettierrc.yaml
YAML_PRETTIER_FILTER_REGEX_EXCLUDE: "(.*\\.sops\\.ya?ml)"

19
.gitignore vendored

@ -0,0 +1,19 @@
# Trash
.DS_Store
Thumbs.db
# k8s
kubeconfig
# vscode-sops
.decrypted~*.yaml
.config.env
*.agekey
*.pub
*.key
github-deploy-key*
# Ansible
xanmanning.k3s*
# Terraform
.terraform
.terraform.tfstate*
terraform.tfstate*
megalinter-reports

@ -0,0 +1,37 @@
---
fail_fast: false
repos:
- repo: https://github.com/adrienverge/yamllint
rev: v1.27.1
hooks:
- args:
- --config-file
- .github/linters/.yamllint.yaml
id: yamllint
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
hooks:
- id: check-merge-conflict
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.3.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 0.4.0
hooks:
- id: fix-smartquotes
- repo: https://github.com/k8s-at-home/sops-pre-commit
rev: v2.1.1
hooks:
- id: forbid-secrets
- repo: https://github.com/gruntwork-io/pre-commit
rev: v0.1.17
hooks:
- id: terraform-fmt
- id: terraform-validate
- id: tflint

@ -0,0 +1,2 @@
megalinter-reports
README.md

@ -0,0 +1,17 @@
# See https://toolkit.fluxcd.io/components/source/gitrepositories/#excluding-files
.github/
.taskfiles/
.vscode/
provision/
tmpl/
.config.sample.env
.envrc
.gitattributes
.gitignore
.pre-commit-config.yaml
.sops.yaml
ansible.cfg
configure
LICENSE
README.md
Taskfile.yml

@ -0,0 +1,68 @@
---
version: "3"
env:
ANSIBLE_CONFIG: "{{.PROJECT_DIR}}/ansible.cfg"
vars:
ANSIBLE_PLAYBOOK_DIR: "{{.ANSIBLE_DIR}}/playbooks"
ANSIBLE_INVENTORY_DIR: "{{.ANSIBLE_DIR}}/inventory"
tasks:
init:
desc: Install / Upgrade Ansible galaxy deps
dir: provision/ansible
cmds:
- ansible-galaxy install -r requirements.yml --roles-path ~/.ansible/roles --force
- ansible-galaxy collection install -r requirements.yml --collections-path ~/.ansible/collections --force
list:
desc: List all the hosts
dir: provision/ansible
cmds:
- ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --list-hosts
prepare:
desc: Prepare all the k8s nodes for running k3s
dir: provision/ansible
cmds:
- ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-prepare.yml
install:
desc: Install Kubernetes on the nodes
dir: provision/ansible
cmds:
- ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-installation.yml
nuke:
desc: Uninstall Kubernetes on the nodes
dir: provision/ansible
interactive: true
cmds:
- ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-nuke.yml
- task: reboot
ping:
desc: Ping all the hosts
dir: provision/ansible
cmds:
- ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --one-line -m 'ping'
uptime:
desc: Uptime of all the hosts
dir: provision/ansible
cmds:
- ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --one-line -a 'uptime'
reboot:
desc: Reboot all the k8s nodes
dir: provision/ansible
cmds:
- ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-reboot.yml
poweroff:
desc: Shutdown all the k8s nodes
dir: provision/ansible
cmds:
- ansible kubernetes -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml -a '/usr/bin/systemctl poweroff' --become

@ -0,0 +1,82 @@
---
version: "3"
tasks:
verify:
desc: Verify flux meets the prerequisites
cmds:
- flux check --pre
install:
desc: Install Flux into your cluster
cmds:
- kubectl apply --kustomize {{.CLUSTER_DIR}}/bootstrap/
- cat {{.SOPS_AGE_KEY_FILE}} | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin
- kubectl apply --kustomize {{.CLUSTER_DIR}}/flux/flux-system/
- task: reconcile
preconditions:
- sh: test -f {{.SOPS_AGE_KEY_FILE}}
msg: |
Age key file is not found. Did you forget to create it?
vars:
SOPS_AGE_KEY_FILE: ~/.config/sops/age/keys.txt
reconcile:
desc: Force update Flux to pull in changes from your Git repository
cmds:
- flux reconcile -n flux-system source git flux-cluster
- flux reconcile -n flux-system kustomization flux-cluster
nodes:
desc: List all the nodes in your cluster
cmds:
- kubectl get nodes {{.CLI_ARGS | default "-o wide"}}
pods:
desc: List all the pods in your cluster
cmds:
- kubectl get pods {{.CLI_ARGS | default "-A"}}
kustomizations:
desc: List all the kustomizations in your cluster
cmds:
- kubectl get kustomizations {{.CLI_ARGS | default "-A"}}
helmreleases:
desc: List all the helmreleases in your cluster
cmds:
- kubectl get helmreleases {{.CLI_ARGS | default "-A"}}
helmrepositories:
desc: List all the helmrepositories in your cluster
cmds:
- kubectl get helmrepositories {{.CLI_ARGS | default "-A"}}
gitrepositories:
desc: List all the gitrepositories in your cluster
cmds:
- kubectl get gitrepositories {{.CLI_ARGS | default "-A"}}
certificates:
desc: List all the certificates in your cluster
cmds:
- kubectl get certificates {{.CLI_ARGS | default "-A"}}
- kubectl get certificaterequests {{.CLI_ARGS | default "-A"}}
ingresses:
desc: List all the ingresses in your cluster
cmds:
- kubectl get ingress {{.CLI_ARGS | default "-A"}}
resources:
desc: Gather common resources in your cluster, useful when asking for support
cmds:
- task: nodes
- task: kustomizations
- task: helmreleases
- task: helmrepositories
- task: gitrepositories
- task: certificates
- task: ingresses
- task: pods

@ -0,0 +1,19 @@
---
version: "3"
tasks:
init:
desc: Initialize pre-commit hooks
cmds:
- pre-commit install --install-hooks
run:
desc: Run pre-commit
cmds:
- pre-commit run --all-files
update:
desc: Update pre-commit hooks
cmds:
- pre-commit autoupdate

@ -0,0 +1,14 @@
---
version: "3"
tasks:
encrypt:
desc: encrypt sops file 'to use must include -- before path to file.' eg "task sops:encrypt -- file.yml"
cmds:
- sops --encrypt --in-place {{.CLI_ARGS}}
decrypt:
desc: decrypt sops file 'to use must include -- before path to file.' eg "task sops:decrypt -- file.yml"
cmds:
- sops --decrypt --in-place {{.CLI_ARGS}}

@ -0,0 +1,22 @@
---
version: "3"
tasks:
init:
desc: Initialize terraform dependencies
dir: provision/terraform/cloudflare
cmds:
- terraform init {{.CLI_ARGS}}
plan:
desc: Show the changes terraform will make
dir: provision/terraform/cloudflare
cmds:
- terraform plan {{.CLI_ARGS}}
apply:
desc: Apply the changes to Cloudflare
dir: provision/terraform/cloudflare
cmds:
- terraform apply {{.CLI_ARGS}}

@ -0,0 +1,13 @@
{
"recommendations": [
"HashiCorp.terraform",
"britesnow.vscode-toggle-quotes",
"mitchdenny.ecdc",
"ms-kubernetes-tools.vscode-kubernetes-tools",
"oderwat.indent-rainbow",
"redhat.ansible",
"signageos.signageos-vscode-sops",
"usernamehw.errorlens",
"fcrespo82.markdown-table-formatter"
]
}

@ -0,0 +1,18 @@
{
"ansible.ansibleLint.arguments": "-c .github/linters/.ansible-lint",
"files.associations": {
"*.json5": "jsonc",
"**/provision/ansible/**/*.yml": "ansible",
"**/provision/ansible/**/*.sops.yml": "yaml",
"**/provision/ansible/**/inventory/**/*.yml": "yaml",
"**/provision/terraform/**/*.tf": "terraform"
},
"prettier.configPath": ".github/linters/.prettierrc.yaml",
"prettier.ignorePath": ".github/linters/.prettierignore",
"yaml.schemas": {
"Kubernetes": "cluster/*.yaml"
},
"editor.bracketPairColorization.enabled": true,
"editor.guides.bracketPairs":"active",
"editor.hover.delay": 1500
}

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 k8s@home
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,541 @@
# Template for deploying k3s backed by Flux
Highly opinionated template for deploying a single [k3s](https://k3s.io) cluster with [Ansible](https://www.ansible.com) and [Terraform](https://www.terraform.io) backed by [Flux](https://toolkit.fluxcd.io/) and [SOPS](https://toolkit.fluxcd.io/guides/mozilla-sops/).
The purpose here is to showcase how you can deploy an entire Kubernetes cluster and show it off to the world using the [GitOps](https://www.weave.works/blog/what-is-gitops-really) tool [Flux](https://toolkit.fluxcd.io/). When completed, your Git repository will be driving the state of your Kubernetes cluster. In addition with the help of the [Ansible](https://github.com/ansible-collections/community.sops), [Terraform](https://github.com/carlpett/terraform-provider-sops) and [Flux](https://toolkit.fluxcd.io/guides/mozilla-sops/) SOPS integrations you'll be able to commit [Age](https://github.com/FiloSottile/age) encrypted secrets to your public repo.
## Overview
- [Introduction](https://github.com/onedr0p/flux-cluster-template#-introduction)
- [Prerequisites](https://github.com/onedr0p/flux-cluster-template#-prerequisites)
- [Repository structure](https://github.com/onedr0p/flux-cluster-template#-repository-structure)
- [Lets go!](https://github.com/onedr0p/flux-cluster-template#-lets-go)
- [Post installation](https://github.com/onedr0p/flux-cluster-template#-post-installation)
- [Troubleshooting](https://github.com/onedr0p/flux-cluster-template#-troubleshooting)
- [What's next](https://github.com/onedr0p/flux-cluster-template#-whats-next)
- [Thanks](https://github.com/onedr0p/flux-cluster-template#-thanks)
## ๐Ÿ‘‹ Introduction
The following components will be installed in your [k3s](https://k3s.io/) cluster by default. Most are only included to get a minimum viable cluster up and running.
- [flux](https://toolkit.fluxcd.io/) - GitOps operator for managing Kubernetes clusters from a Git repository
- [kube-vip](https://kube-vip.io/) - Load balancer for the Kubernetes control plane nodes
- [metallb](https://metallb.universe.tf/) - Load balancer for Kubernetes services
- [cert-manager](https://cert-manager.io/) - Operator to request SSL certificates and store them as Kubernetes resources
- [calico](https://www.tigera.io/project-calico/) - Container networking interface for inter pod and service networking
- [external-dns](https://github.com/kubernetes-sigs/external-dns) - Operator to publish DNS records to Cloudflare (and other providers) based on Kubernetes ingresses
- [k8s_gateway](https://github.com/ori-edge/k8s_gateway) - DNS resolver that provides local DNS to your Kubernetes ingresses
- [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) - Kubernetes ingress controller used for a HTTP reverse proxy of Kubernetes ingresses
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) - provision persistent local storage with Kubernetes
_Additional applications include [hajimari](https://github.com/toboshii/hajimari), [error-pages](https://github.com/tarampampam/error-pages), [echo-server](https://github.com/Ealenn/Echo-Server), [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller), [reloader](https://github.com/stakater/Reloader), and [kured](https://github.com/weaveworks/kured)_
For provisioning the following tools will be used:
- [Fedora 36 Server](https://getfedora.org/en/server/download/) - Universal operating system that supports running all kinds of home related workloads in Kubernetes and has a faster release cycle
- [Ansible](https://www.ansible.com) - Provision Fedora Server and install k3s
- [Terraform](https://www.terraform.io) - Provision an already existing Cloudflare domain and certain DNS records to be used with your k3s cluster
## ๐Ÿ“ Prerequisites
**Note:** _This template has not been tested on cloud providers like AWS EC2, Hetzner, Scaleway etc... Those cloud offerings probably have a better way of provsioning a Kubernetes cluster and it's advisable to use those instead of the Ansible playbooks included here. This repository can still be tweaked for the GitOps/Flux portion if there's a cluster working in one those environments._
### ๐Ÿ“š Reading material
- [Organizing Cluster Access Using kubeconfig Files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/)
### ๐Ÿ’ป Systems
- One or more nodes with a fresh install of [Fedora Server 36](https://getfedora.org/en/server/download/).
- These nodes can be ARM64/AMD64 bare metal or VMs.
- An odd number of control plane nodes, greater than or equal to 3 is required if deploying more than one control plane node.
- A [Cloudflare](https://www.cloudflare.com/) account with a domain, this will be managed by Terraform and external-dns. You can [register new domains](https://www.cloudflare.com/products/registrar/) directly thru Cloudflare.
- Some experience in debugging problems and a positive attitude ;)
๐Ÿ“ It is recommended to have 3 master nodes for a highly available control plane.
### ๐Ÿ”ง Workstation Tools
1. Install the **most recent versions** of the following CLI tools on your workstation, if you are using [Homebrew](https://brew.sh/) on MacOS or Linux skip to steps 3 and 4.
* Required: [age](https://github.com/FiloSottile/age), [ansible](https://www.ansible.com), [flux](https://toolkit.fluxcd.io/), [go-task](https://github.com/go-task/task), [ipcalc](http://jodies.de/ipcalc), [jq](https://stedolan.github.io/jq/), [kubectl](https://kubernetes.io/docs/tasks/tools/), [pre-commit](https://github.com/pre-commit/pre-commit), [sops](https://github.com/mozilla/sops), [terraform](https://www.terraform.io), [yq v4](https://github.com/mikefarah/yq)
* Recommended: [direnv](https://github.com/direnv/direnv), [helm](https://helm.sh/), [kustomize](https://github.com/kubernetes-sigs/kustomize), [prettier](https://github.com/prettier/prettier), [stern](https://github.com/stern/stern), [yamllint](https://github.com/adrienverge/yamllint)
2. This guide heavily relies on [go-task](https://github.com/go-task/task) as a framework for setting things up. It is advised to learn and understand the commands it is running under the hood.
3. Install [go-task](https://github.com/go-task/task) via Brew
```sh
brew install go-task/tap/go-task
```
4. Install workstation dependencies via Brew
```sh
task init
```
### โš ๏ธ pre-commit
It is advisable to install [pre-commit](https://pre-commit.com/) and the pre-commit hooks that come with this repository.
[sops-pre-commit](https://github.com/k8s-at-home/sops-pre-commit) will check to make sure you are not committing non-encrypted Kubernetes secrets to your repository.
1. Enable Pre-Commit
```sh
task precommit:init
```
2. Update Pre-Commit, though it will occasionally make mistakes, so verify its results.
```sh
task precommit:update
```
## ๐Ÿ“‚ Repository structure
The Git repository contains the following directories under `cluster` and are ordered below by how Flux will apply them.
```sh
๐Ÿ“ cluster # k8s cluster defined as code
โ”œโ”€๐Ÿ“ flux # flux, gitops operator, loaded before everything
โ”œโ”€๐Ÿ“ crds # custom resources, loaded before ๐Ÿ“ core and ๐Ÿ“ apps
โ”œโ”€๐Ÿ“ charts # helm repos, loaded before ๐Ÿ“ core and ๐Ÿ“ apps
โ”œโ”€๐Ÿ“ config # cluster config, loaded before ๐Ÿ“ core and ๐Ÿ“ apps
โ”œโ”€๐Ÿ“ core # crucial apps, namespaced dir tree, loaded before ๐Ÿ“ apps
โ””โ”€๐Ÿ“ apps # regular apps, namespaced dir tree, loaded last
```
## ๐Ÿš€ Lets go
Very first step will be to create a new repository by clicking the **Use this template** button on this page.
Clone the repo to you local workstation and `cd` into it.
๐Ÿ“ **All of the below commands** are run on your **local** workstation, **not** on any of your cluster nodes.
### ๐Ÿ” Setting up Age
๐Ÿ“ Here we will create a Age Private and Public key. Using [SOPS](https://github.com/mozilla/sops) with [Age](https://github.com/FiloSottile/age) allows us to encrypt secrets and use them in Ansible, Terraform and Flux.
1. Create a Age Private / Public Key
```sh
age-keygen -o age.agekey
```
2. Set up the directory for the Age key and move the Age file to it
```sh
mkdir -p ~/.config/sops/age
mv age.agekey ~/.config/sops/age/keys.txt
```
3. Export the `SOPS_AGE_KEY_FILE` variable in your `bashrc`, `zshrc` or `config.fish` and source it, e.g.
```sh
export SOPS_AGE_KEY_FILE=~/.config/sops/age/keys.txt
source ~/.bashrc
```
4. Fill out the Age public key in the `.config.env` under `BOOTSTRAP_AGE_PUBLIC_KEY`, **note** the public key should start with `age`...
### โ˜๏ธ Global Cloudflare API Key
In order to use Terraform and `cert-manager` with the Cloudflare DNS challenge you will need to create a API key.
1. Head over to Cloudflare and create a API key by going [here](https://dash.cloudflare.com/profile/api-tokens).
2. Under the `API Keys` section, create a global API Key.
3. Use the API Key in the configuration section below.
๐Ÿ“ You may wish to update this later on to a Cloudflare **API Token** which can be scoped to certain resources. I do not recommend using a Cloudflare **API Key**, however for the purposes of this template it is easier getting started without having to define which scopes and resources are needed. For more information see the [Cloudflare docs on API Keys and Tokens](https://developers.cloudflare.com/api/).
### ๐Ÿ“„ Configuration
๐Ÿ“ The `.config.env` file contains necessary configuration that is needed by Ansible, Terraform and Flux.
1. Copy the `.config.sample.env` to `.config.env` and start filling out all the environment variables.
**All are required** unless otherwise noted in the comments.
```sh
cp .config.sample.env .config.env
```
2. Once that is done, verify the configuration is correct by running:
```sh
task verify
```
3. If you do not encounter any errors run start having the script wire up the templated files and place them where they need to be.
```sh
task configure
```
### โšก Preparing Fedora Server with Ansible
๐Ÿ“ Here we will be running a Ansible Playbook to prepare Fedora Server for running a Kubernetes cluster.
๐Ÿ“ Nodes are not security hardened by default, you can do this with [dev-sec/ansible-collection-hardening](https://github.com/dev-sec/ansible-collection-hardening) or similar if it supports Fedora Server.
1. Ensure you are able to SSH into your nodes from your workstation using a private SSH key **without a passphrase**. This is how Ansible is able to connect to your remote nodes.
[How to configure SSH key-based authentication](https://www.digitalocean.com/community/tutorials/how-to-configure-ssh-key-based-authentication-on-a-linux-server)
2. Install the Ansible deps
```sh
task ansible:init
```
3. Verify Ansible can view your config
```sh
task ansible:list
```
4. Verify Ansible can ping your nodes
```sh
task ansible:ping
```
5. Run the Fedora Server Ansible prepare playbook
```sh
task ansible:prepare
```
6. Reboot the nodes
```sh
task ansible:reboot
```
### โ›ต Installing k3s with Ansible
๐Ÿ“ Here we will be running a Ansible Playbook to install [k3s](https://k3s.io/) with [this](https://galaxy.ansible.com/xanmanning/k3s) wonderful k3s Ansible galaxy role. After completion, Ansible will drop a `kubeconfig` in `./provision/kubeconfig` for use with interacting with your cluster with `kubectl`.
โ˜ข๏ธ If you run into problems, you can run `task ansible:nuke` to destroy the k3s cluster and start over.
1. Verify Ansible can view your config
```sh
task ansible:list
```
2. Verify Ansible can ping your nodes
```sh
task ansible:ping
```
3. Install k3s with Ansible
```sh
task ansible:install
```
4. Verify the nodes are online
```sh
task cluster:nodes
# NAME STATUS ROLES AGE VERSION
# k8s-0 Ready control-plane,master 4d20h v1.21.5+k3s1
# k8s-1 Ready worker 4d20h v1.21.5+k3s1
```
### โ˜๏ธ Configuring Cloudflare DNS with Terraform
๐Ÿ“ Review the Terraform scripts under `./provision/terraform/cloudflare/` and make sure you understand what it's doing (no really review it).
If your domain already has existing DNS records **be sure to export those DNS settings before you continue**.
1. Pull in the Terraform deps
```sh
task terraform:init
```
2. Review the changes Terraform will make to your Cloudflare domain
```sh
task terraform:plan
```
3. Have Terraform apply your Cloudflare settings
```sh
task terraform:apply
```
If Terraform was ran successfully you can log into Cloudflare and validate the DNS records are present.
The cluster application [external-dns](https://github.com/kubernetes-sigs/external-dns) will be managing the rest of the DNS records you will need.
### ๐Ÿ”น GitOps with Flux
๐Ÿ“ Here we will be installing [flux](https://toolkit.fluxcd.io/) after some quick bootstrap steps.
1. Verify Flux can be installed
```sh
task cluster:verify
# โ–บ checking prerequisites
# โœ” kubectl 1.21.5 >=1.18.0-0
# โœ” Kubernetes 1.21.5+k3s1 >=1.16.0-0
# โœ” prerequisites checks passed
```
2. Push you changes to git
๐Ÿ“ **Verify** all the `*.sops.yaml` and `*.sops.yml` files under the `./cluster` and `./provision` folders are **encrypted** with SOPS
```sh
git add -A
git commit -m "Initial commit :rocket:"
git push
```
3. Install Flux and sync the cluster to the Git repository
```sh
task cluster:install
# namespace/flux-system configured
# customresourcedefinition.apiextensions.k8s.io/alerts.notification.toolkit.fluxcd.io created
```
4. Verify Flux components are running in the cluster
```sh
task cluster:pods -- -n flux-system
# NAME READY STATUS RESTARTS AGE
# helm-controller-5bbd94c75-89sb4 1/1 Running 0 1h
# kustomize-controller-7b67b6b77d-nqc67 1/1 Running 0 1h
# notification-controller-7c46575844-k4bvr 1/1 Running 0 1h
# source-controller-7d6875bcb4-zqw9f 1/1 Running 0 1h
```
### ๐ŸŽค Verification Steps
_Mic check, 1, 2_ - In a few moments applications should be lighting up like a Christmas tree ๐ŸŽ„
You are able to run all the commands below with one task
```sh
task cluster:resources
```
1. View the Flux Git Repositories
```sh
task cluster:gitrepositories
```
2. View the Flux kustomizations
```sh
task cluster:kustomizations
```
3. View all the Flux Helm Releases
```sh
task cluster:helmreleases
```
4. View all the Flux Helm Repositories
```sh
task cluster:helmrepositories
```
5. View all the Pods
```sh
task cluster:pods
```
6. View all the certificates and certificate requests
```sh
task cluster:certificates
```
7. View all the ingresses
```sh
task cluster:ingresses
```
๐Ÿ† **Congratulations** if all goes smooth you'll have a Kubernetes cluster managed by Flux, your Git repository is driving the state of your cluster.
โ˜ข๏ธ If you run into problems, you can run `task ansible:nuke` to destroy the k3s cluster and start over.
๐Ÿง  Now it's time to pause and go get some coffee โ˜• because next is describing how DNS is handled.
## ๐Ÿ“ฃ Post installation
### ๐ŸŒ DNS
๐Ÿ“ The [external-dns](https://github.com/kubernetes-sigs/external-dns) application created in the `networking` namespace will handle creating public DNS records. By default, `echo-server` is the only public domain exposed on your Cloudflare domain. In order to make additional applications public you must set an ingress annotation like in the `HelmRelease` for `echo-server`. You do not need to use Terraform to create additional DNS records unless you need a record outside the purposes of your Kubernetes cluster (e.g. setting up MX records).
[k8s_gateway](https://github.com/ori-edge/k8s_gateway) is deployed on the IP choosen for `${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}`. Inorder to test DNS you can point your clients DNS to the `${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}` IP address and load `https://hajimari.${BOOTSTRAP_CLOUDFLARE_DOMAIN}` in your browser.
You can also try debugging with the command `dig`, e.g. `dig @${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR} hajimari.${BOOTSTRAP_CLOUDFLARE_DOMAIN}` and you should get a valid answer containing your `${BOOTSTRAP_METALLB_INGRESS_ADDR}` IP address.
If your router (or Pi-Hole, Adguard Home or whatever) supports conditional DNS forwarding (also know as split-horizon DNS) you may have DNS requests for `${SECRET_DOMAIN}` only point to the `${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}` IP address. This will ensure only DNS requests for `${SECRET_DOMAIN}` will only get routed to your [k8s_gateway](https://github.com/ori-edge/k8s_gateway) service thus providing DNS resolution to your cluster applications/ingresses.
To access services from the outside world port forwarded `80` and `443` in your router to the `${BOOTSTRAP_METALLB_INGRESS_ADDR}` IP, in a few moments head over to your browser and you _should_ be able to access `https://echo-server.${BOOTSTRAP_CLOUDFLARE_DOMAIN}` from a device outside your LAN.
Now if nothing is working, that is expected. This is DNS after all!
### ๐Ÿ” SSL
By default in this template Kubernetes ingresses are set to use the [Let's Encrypt Staging Environment](https://letsencrypt.org/docs/staging-environment/). This will hopefully reduce issues from ACME on requesting certificates until you are ready to use this in "Production".
Once you have confirmed there are no issues requesting your certificates replace `letsencrypt-staging` with `letsencrypt-production` in your ingress annotations for `cert-manager.io/cluster-issuer`
### ๐Ÿค– Renovatebot
[Renovatebot](https://www.mend.io/free-developer-tools/renovate/) will scan your repository and offer PRs when it finds dependencies out of date. Common dependencies it will discover and update are Flux, Ansible Galaxy Roles, Terraform Providers, Kubernetes Helm Charts, Kubernetes Container Images, Pre-commit hooks updates, and more!
The base Renovate configuration provided in your repository can be view at [.github/renovate.json5](https://github.com/onedr0p/flux-cluster-template/blob/main/.github/renovate.json5). If you notice this only runs on weekends and you can [change the schedule to anything you want](https://docs.renovatebot.com/presets-schedule/) or simply remove it.
To enable Renovate on your repository, click the 'Configure' button over at their [Github app page](https://github.com/apps/renovate) and choose your repository. Over time Renovate will create PRs for out-of-date dependencies it finds. Any merged PRs that are in the cluster directory Flux will deploy.
### ๐Ÿช Github Webhook
Flux is pull-based by design meaning it will periodically check your git repository for changes, using a webhook you can enable Flux to update your cluster on `git push`. In order to configure Github to send `push` events from your repository to the Flux webhook receiver you will need two things:
1. Webhook URL - Your webhook receiver will be deployed on `https://flux-receiver.${BOOTSTRAP_CLOUDFLARE_DOMAIN}/hook/:hookId`. In order to find out your hook id you can run the following command:
```sh
kubectl -n flux-system get receiver/github-receiver --kubeconfig=./provision/kubeconfig
# NAME AGE READY STATUS
# github-receiver 6h8m True Receiver initialized with URL: /hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123
```
So if my domain was `onedr0p.com` the full url would look like this:
```text
https://flux-receiver.onedr0p.com/hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123
```
2. Webhook secret - Your webhook secret can be found by decrypting the `secret.sops.yaml` using the following command:
```sh
sops -d ./cluster/apps/flux-system/webhooks/github/secret.sops.yaml | yq .stringData.token
```
**Note:** Don't forget to update the `BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET` variable in your `.config.env` file so it matches the generated secret if applicable
Now that you have the webhook url and secret, it's time to set everything up on the Github repository side. Navigate to the settings of your repository on Github, under "Settings/Webhooks" press the "Add webhook" button. Fill in the webhook url and your secret.
### ๐Ÿ’พ Storage
Rancher's `local-path-provisioner` is a great start for storage but soon you might find you need more features like replicated block storage, or to connect to a NFS/SMB/iSCSI server. Check out the projects below to read up more on some storage solutions that might work for you.
- [rook-ceph](https://github.com/rook/rook)
- [longhorn](https://github.com/longhorn/longhorn)
- [openebs](https://github.com/openebs/openebs)
- [nfs-subdir-external-provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner)
- [democratic-csi](https://github.com/democratic-csi/democratic-csi)
- [csi-driver-nfs](https://github.com/kubernetes-csi/csi-driver-nfs)
- [synology-csi](https://github.com/SynologyOpenSource/synology-csi)
### ๐Ÿ” Authenticate Flux over SSH
Authenticating Flux to your git repository has a couple benefits like using a private git repository and/or using the Flux [Image Automation Controllers](https://fluxcd.io/docs/components/image/).
By default this template only works on a public GitHub repository, it is advised to keep your repository public.
The benefits of a public repository include:
* Debugging or asking for help, you can provide a link to a resource you are having issues with.
* Adding a topic to your repository of `k8s-at-home` to be included in the [k8s-at-home-search](https://whazor.github.io/k8s-at-home-search/). This search helps people discover different configurations of Helm charts across others Flux based repositories.
<details>
<summary>Expand to read guide on adding Flux SSH authentication</summary>
1. Generate new SSH key:
```sh
ssh-keygen -t ecdsa -b 521 -C "github-deploy-key" -f ./cluster/github-deploy-key -q -P ""
```
2. Paste public key in the deploy keys section of your repository settings
3. Create sops secret in `cluster/flux/flux-system/github-deploy-key.sops.yaml` with the contents of:
```yaml
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: github-deploy-key
namespace: flux-system
stringData:
# 3a. Contents of github-deploy-key
identity: |
-----BEGIN OPENSSH PRIVATE KEY-----
...
-----END OPENSSH PRIVATE KEY-----
# 3b. Output of curl --silent https://api.github.com/meta | jq --raw-output '"github.com "+.ssh_keys[]'
known_hosts: |
github.com ssh-ed25519 ...
github.com ecdsa-sha2-nistp256 ...
github.com ssh-rsa ...
```
4. Encrypt secret:
```sh
sops --encrypt --in-place ./cluster/flux/flux-system/github-deploy-key.sops.yaml
```
5. Apply secret to cluster:
```sh
sops --decrypt cluster/flux/flux-system/github-deploy-key.sops.yaml | kubectl apply -f -
```
6. Update `cluster/flux/flux-system/flux-cluster.yaml`:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: GitRepository
metadata:
name: flux-cluster
namespace: flux-system
spec:
interval: 10m
# 6a: Change this to your user and repo names
url: ssh://git@github.com/$user/$repo
ref:
branch: main
secretRef:
name: github-deploy-key
```
7. Commit and push changes
8. Force flux to reconcile your changes
```sh
task cluster:reconcile
```
9. Verify git repository is now using SSH:
```sh
task cluster:gitrepositories
```
10. Optionally set your repository to Private in your repository settings.
</details>
## ๐Ÿ‘‰ Troubleshooting
Our [wiki](https://github.com/onedr0p/flux-cluster-template/wiki) (WIP, contributions welcome) is a good place to start troubleshooting issues. If that doesn't cover your issue, come join and say Hi in our community [Discord](https://discord.gg/k8s-at-home).
## โ” What's next
The world is your cluster, have at it!
## ๐Ÿค Thanks
Big shout out to all the authors and contributors to the projects that we are using in this repository.
Community member [@Whazor](https://github.com/whazor) created [this website](https://whazor.github.io/k8s-at-home-search/) as a creative way to search Helm Releases across GitHub. You may use it as a means to get ideas on how to configure an applications' Helm values.

@ -0,0 +1,63 @@
---
version: "3"
vars:
PROJECT_DIR:
sh: "git rev-parse --show-toplevel"
CLUSTER_DIR: "{{.PROJECT_DIR}}/cluster"
ANSIBLE_DIR: "{{.PROJECT_DIR}}/provision/ansible"
TERRAFORM_DIR: "{{.PROJECT_DIR}}/provision/terraform"
dotenv: [".config.env"]
env:
KUBECONFIG: "{{.PROJECT_DIR}}/provision/kubeconfig"
includes:
ansible: .taskfiles/AnsibleTasks.yml
cluster: .taskfiles/ClusterTasks.yml
precommit: .taskfiles/PrecommitTasks.yml
terraform: .taskfiles/TerraformTasks.yml
sops: .taskfiles/SopsTasks.yml
tasks:
init:
desc: Initialize workstation dependencies with Brew
cmds:
- brew install {{.DEPS}} {{.CLI_ARGS}}
preconditions:
- sh: command -v brew
msg: |
Homebrew is not installed. Using MacOS, Linux or WSL?
Head over to https://brew.sh to get up and running.
vars:
DEPS: >-
age
ansible
direnv
fluxcd/tap/flux
go-task/tap/go-task
helm
ipcalc
jq
kubernetes-cli
kustomize
pre-commit
prettier
sops
stern
terraform
tflint
yamllint
yq
verify:
desc: Verify env settings
cmds:
- ./configure --verify
configure:
desc: Configure repository from env settings
cmds:
- ./configure

@ -0,0 +1,35 @@
[defaults]
# General settings
nocows = True
executable = /bin/bash
stdout_callback = yaml
force_valid_group_names = ignore
# File/Directory settings
log_path = ~/.ansible/ansible.log
inventory = ./provision/ansible/inventory
roles_path = ~/.ansible/roles:./provision/ansible/roles
collections_path = ~/.ansible/collections
remote_tmp = /tmp
local_tmp = ~/.ansible/tmp
# Fact Caching settings
fact_caching = jsonfile
fact_caching_connection = ~/.ansible/facts_cache
# SSH settings
remote_port = 22
timeout = 60
host_key_checking = False
# Plugin settings
vars_plugins_enabled = host_group_vars,community.sops.sops
[inventory]
unparsed_is_failed = true
[privilege_escalation]
become = True
[ssh_connection]
scp_if_ssh = smart
retries = 3
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s
pipelining = True
control_path = %(directory)s/%%h-%%r

@ -0,0 +1,75 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: echo-server
namespace: default
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 0.1.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
values:
controller:
replicas: 3
strategy: RollingUpdate
image:
repository: docker.io/jmalloc/echo-server
tag: 0.3.3
service:
main:
ports:
http:
port: &port 8080
probes:
liveness: &probes
enabled: true
custom: true
spec:
httpGet:
path: /health
port: *port
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
readiness: *probes
startup:
enabled: false
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-staging"
external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}"
external-dns/is-public: "true"
hajimari.io/enable: "true"
hajimari.io/icon: "video-input-antenna"
hosts:
- host: &host "{{ .Release.Name }}.${SECRET_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
secretName: "echo-server-tls"
resources: