From 38aaadbfb17b9c8ca7b8786a6865212ac9b44ef7 Mon Sep 17 00:00:00 2001 From: Sergey Antropoff Date: Wed, 29 Apr 2026 23:21:04 +0300 Subject: [PATCH] docs: sync addon docs with explicit external/internal service modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Обновлена документация под новые аддоны (gitlab, redis, mongodb, kafka, kafka-ui, rabbitmq) и новую модель явного выбора зависимостей. Добавлены и унифицированы описания переключателей *_database_mode и *_redis_mode, обновлена таблица зависимостей аддонов, примеры конфигурации и список vault-секретов. --- .gitignore | 1 + Makefile | 58 ++- README.md | 18 +- addons/argocd/README.md | 13 + addons/argocd/role/defaults/main.yml | 12 + addons/argocd/role/tasks/main.yml | 15 + addons/authelia/README.md | 483 ++---------------- .../role/chart/files/configuration.yml.tpl | 4 +- .../authelia/role/chart/templates/redis.yaml | 2 +- addons/authelia/role/chart/values.yaml | 4 + addons/authelia/role/defaults/main.yml | 12 +- .../role/molecule/default/converge.yml | 4 +- addons/authelia/role/tasks/main.yml | 2 +- addons/authelia/role/templates/values.yaml.j2 | 18 +- addons/ceph-rock/README.md | 109 ++++ addons/ceph-rock/playbook.yml | 7 + addons/ceph-rock/role/defaults/main.yml | 66 +++ .../role/molecule/default/converge.yml | 30 ++ .../role/molecule/default/molecule.yml | 28 + .../role/molecule/default/verify.yml | 50 ++ addons/ceph-rock/role/tasks/main.yml | 98 ++++ .../role/templates/ceph-cluster.yaml.j2 | 127 +++++ .../templates/ceph-dashboard-ingress.yaml.j2 | 29 ++ addons/cert-manager/README.md | 5 + addons/crowdsec/README.md | 5 + addons/csi-ceph/README.md | 187 ++++--- addons/csi-ceph/playbook.yml | 2 +- addons/csi-ceph/role/defaults/main.yml | 90 ++-- .../role/molecule/default/converge.yml | 50 +- .../csi-ceph/role/molecule/default/verify.yml | 61 +-- addons/csi-ceph/role/tasks/main.yml | 136 ++--- .../templates/csi-ceph-secrets-and-sc.yaml.j2 | 68 +++ .../role/templates/csi-config-map.yaml.j2 | 22 + addons/csi-glusterfs/README.md | 5 + addons/csi-nfs/README.md | 5 + addons/csi-s3/README.md | 5 + addons/databasus/README.md | 5 + addons/external-secrets/README.md | 5 + addons/gitea/README.md | 10 +- addons/gitea/role/defaults/main.yml | 7 +- addons/gitea/role/tasks/main.yml | 15 +- .../gitea/role/templates/gitea-values.yaml.j2 | 6 +- addons/gitlab/README.md | 47 ++ addons/gitlab/playbook.yml | 7 + addons/gitlab/role/defaults/main.yml | 47 ++ .../gitlab/role/molecule/default/converge.yml | 20 + .../gitlab/role/molecule/default/molecule.yml | 27 + .../gitlab/role/molecule/default/verify.yml | 26 + addons/gitlab/role/tasks/main.yml | 143 ++++++ .../role/templates/gitlab-values.yaml.j2 | 55 ++ addons/harbor/README.md | 12 +- addons/harbor/role/defaults/main.yml | 16 +- addons/hysteria2-server/README.md | 5 + addons/ingress-add-domains/README.md | 5 + addons/ingress-nginx/README.md | 5 + addons/ingress-proxypass/README.md | 5 + addons/istio/README.md | 5 + addons/jenkins/README.md | 5 + addons/kafka-ui/README.md | 37 ++ addons/kafka-ui/playbook.yml | 7 + addons/kafka-ui/role/defaults/main.yml | 26 + .../role/molecule/default/converge.yml | 11 + .../role/molecule/default/molecule.yml | 14 + .../kafka-ui/role/molecule/default/verify.yml | 14 + addons/kafka-ui/role/tasks/main.yml | 27 + .../role/templates/kafka-ui-values.yaml.j2 | 25 + addons/kafka/README.md | 30 ++ addons/kafka/playbook.yml | 7 + addons/kafka/role/defaults/main.yml | 29 ++ .../kafka/role/molecule/default/converge.yml | 10 + .../kafka/role/molecule/default/molecule.yml | 14 + addons/kafka/role/molecule/default/verify.yml | 13 + addons/kafka/role/tasks/main.yml | 52 ++ addons/kubernetes-dashboard/README.md | 5 + addons/loki/README.md | 5 + addons/longhorn/README.md | 5 + addons/mediaserver/README.md | 5 + addons/metrics-server/README.md | 5 + addons/minio/README.md | 5 + addons/mongodb/README.md | 28 + addons/mongodb/playbook.yml | 7 + addons/mongodb/role/defaults/main.yml | 33 ++ .../role/molecule/default/converge.yml | 10 + .../role/molecule/default/molecule.yml | 14 + .../mongodb/role/molecule/default/verify.yml | 13 + addons/mongodb/role/tasks/main.yml | 40 ++ addons/mysql/README.md | 5 + addons/netbird/README.md | 5 + addons/nextcloud/README.md | 12 +- addons/nextcloud/role/defaults/main.yml | 7 +- addons/nextcloud/role/tasks/main.yml | 15 +- .../role/templates/nextcloud-values.yaml.j2 | 2 +- addons/nfs-server/README.md | 5 + addons/owncloud/README.md | 5 + addons/postgresql/README.md | 5 + addons/prometheus-stack/README.md | 5 + addons/promtail/README.md | 5 + addons/pushgateway/README.md | 5 + addons/rabbitmq/README.md | 28 + addons/rabbitmq/playbook.yml | 7 + addons/rabbitmq/role/defaults/main.yml | 26 + .../role/molecule/default/converge.yml | 10 + .../role/molecule/default/molecule.yml | 14 + .../rabbitmq/role/molecule/default/verify.yml | 13 + addons/rabbitmq/role/tasks/main.yml | 33 ++ addons/redis/README.md | 28 + addons/redis/playbook.yml | 7 + addons/redis/role/defaults/main.yml | 26 + .../redis/role/molecule/default/converge.yml | 11 + .../redis/role/molecule/default/molecule.yml | 14 + addons/redis/role/molecule/default/verify.yml | 13 + addons/redis/role/tasks/main.yml | 39 ++ addons/smtp-relay/README.md | 5 + addons/splitgw/README.md | 5 + addons/technitium-dns/README.md | 174 ++----- addons/tempo/README.md | 5 + addons/vault/README.md | 5 + addons/vaultwarden/README.md | 5 + addons/velero/README.md | 5 + addons/yandex-dns-controller/README.md | 5 + docs/addons.md | 52 +- docs/cicd.md | 28 +- docs/configuration.md | 14 + docs/security.md | 98 ++++ docs/storage.md | 4 +- group_vars/all/addons.yml | 149 +++++- group_vars/all/vault.yml.example | 32 ++ playbooks/addons.yml | 52 +- 128 files changed, 2881 insertions(+), 902 deletions(-) create mode 100644 addons/ceph-rock/README.md create mode 100644 addons/ceph-rock/playbook.yml create mode 100644 addons/ceph-rock/role/defaults/main.yml create mode 100644 addons/ceph-rock/role/molecule/default/converge.yml create mode 100644 addons/ceph-rock/role/molecule/default/molecule.yml create mode 100644 addons/ceph-rock/role/molecule/default/verify.yml create mode 100644 addons/ceph-rock/role/tasks/main.yml create mode 100644 addons/ceph-rock/role/templates/ceph-cluster.yaml.j2 create mode 100644 addons/ceph-rock/role/templates/ceph-dashboard-ingress.yaml.j2 create mode 100644 addons/csi-ceph/role/templates/csi-ceph-secrets-and-sc.yaml.j2 create mode 100644 addons/csi-ceph/role/templates/csi-config-map.yaml.j2 create mode 100644 addons/gitlab/README.md create mode 100644 addons/gitlab/playbook.yml create mode 100644 addons/gitlab/role/defaults/main.yml create mode 100644 addons/gitlab/role/molecule/default/converge.yml create mode 100644 addons/gitlab/role/molecule/default/molecule.yml create mode 100644 addons/gitlab/role/molecule/default/verify.yml create mode 100644 addons/gitlab/role/tasks/main.yml create mode 100644 addons/gitlab/role/templates/gitlab-values.yaml.j2 create mode 100644 addons/kafka-ui/README.md create mode 100644 addons/kafka-ui/playbook.yml create mode 100644 addons/kafka-ui/role/defaults/main.yml create mode 100644 addons/kafka-ui/role/molecule/default/converge.yml create mode 100644 addons/kafka-ui/role/molecule/default/molecule.yml create mode 100644 addons/kafka-ui/role/molecule/default/verify.yml create mode 100644 addons/kafka-ui/role/tasks/main.yml create mode 100644 addons/kafka-ui/role/templates/kafka-ui-values.yaml.j2 create mode 100644 addons/kafka/README.md create mode 100644 addons/kafka/playbook.yml create mode 100644 addons/kafka/role/defaults/main.yml create mode 100644 addons/kafka/role/molecule/default/converge.yml create mode 100644 addons/kafka/role/molecule/default/molecule.yml create mode 100644 addons/kafka/role/molecule/default/verify.yml create mode 100644 addons/kafka/role/tasks/main.yml create mode 100644 addons/mongodb/README.md create mode 100644 addons/mongodb/playbook.yml create mode 100644 addons/mongodb/role/defaults/main.yml create mode 100644 addons/mongodb/role/molecule/default/converge.yml create mode 100644 addons/mongodb/role/molecule/default/molecule.yml create mode 100644 addons/mongodb/role/molecule/default/verify.yml create mode 100644 addons/mongodb/role/tasks/main.yml create mode 100644 addons/rabbitmq/README.md create mode 100644 addons/rabbitmq/playbook.yml create mode 100644 addons/rabbitmq/role/defaults/main.yml create mode 100644 addons/rabbitmq/role/molecule/default/converge.yml create mode 100644 addons/rabbitmq/role/molecule/default/molecule.yml create mode 100644 addons/rabbitmq/role/molecule/default/verify.yml create mode 100644 addons/rabbitmq/role/tasks/main.yml create mode 100644 addons/redis/README.md create mode 100644 addons/redis/playbook.yml create mode 100644 addons/redis/role/defaults/main.yml create mode 100644 addons/redis/role/molecule/default/converge.yml create mode 100644 addons/redis/role/molecule/default/molecule.yml create mode 100644 addons/redis/role/molecule/default/verify.yml create mode 100644 addons/redis/role/tasks/main.yml diff --git a/.gitignore b/.gitignore index b2fc734..b5de203 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ host_vars/*/vault.yml *.retry dashboard/ +*.db # Kubeconfig — содержит токены доступа к кластеру kubeconfig diff --git a/Makefile b/Makefile index dab335e..1b3c26c 100644 --- a/Makefile +++ b/Makefile @@ -58,11 +58,11 @@ DOCKER_RUN := docker run --rm $(DOCKER_TTY) \ addon-ingress-nginx addon-cert-manager addon-nfs-server addon-csi-nfs addon-nfs \ addon-istio addon-prometheus-stack addon-metrics-server \ addon-argocd addon-longhorn addon-kubernetes-dashboard \ - addon-postgresql addon-mysql addon-databasus \ + addon-postgresql addon-mysql addon-redis addon-mongodb addon-kafka addon-kafka-ui addon-rabbitmq addon-gitlab addon-databasus \ addon-minio addon-velero addon-crowdsec \ addon-loki addon-promtail addon-tempo addon-pushgateway \ addon-harbor addon-gitea addon-owncloud addon-nextcloud \ - addon-csi-s3 addon-csi-ceph addon-csi-glusterfs addon-vaultwarden \ + addon-csi-s3 addon-csi-ceph addon-ceph-rock addon-csi-glusterfs addon-vaultwarden \ addon-smtp-relay addon-vault addon-external-secrets \ addon-jenkins addon-netbird addon-mediaserver addon-hysteria2-server addon-splitgw addon-ingress-proxypass addon-ingress-add-domains addon-yandex-dns-controller addon-technitium-dns addon-authelia \ add-node remove-node \ @@ -75,6 +75,7 @@ DOCKER_RUN := docker run --rm $(DOCKER_TTY) \ molecule-addon-technitium-dns molecule-addon-authelia \ molecule-addon-ingress-proxypass molecule-addon-ingress-add-domains \ molecule-addon-yandex-dns-controller \ + molecule-addon-gitlab molecule-addon-redis molecule-addon-mongodb molecule-addon-kafka molecule-addon-kafka-ui molecule-addon-rabbitmq \ molecule-addon-all molecule-all molecule-lint molecule-report \ vault-create vault-edit vault-view vault-encrypt-string \ vault-bootstrap-create vault-bootstrap-edit \ @@ -331,6 +332,30 @@ addon-mysql: _check_env _check_image ## Установить MySQL (Bitnami; ARG @printf "$(CYAN)Устанавливаю MySQL...$(NC)\n" $(DOCKER_RUN) addon mysql $(ARGS) +addon-redis: _check_env _check_image ## Установить Redis (Bitnami; ARGS="-e redis_architecture=replication") + @printf "$(CYAN)Устанавливаю Redis...$(NC)\n" + $(DOCKER_RUN) addon redis $(ARGS) + +addon-mongodb: _check_env _check_image ## Установить MongoDB (Bitnami; ARGS="-e mongodb_architecture=replicaset") + @printf "$(CYAN)Устанавливаю MongoDB...$(NC)\n" + $(DOCKER_RUN) addon mongodb $(ARGS) + +addon-kafka: _check_env _check_image ## Установить Kafka (Bitnami KRaft; ARGS="-e kafka_mode=cluster") + @printf "$(CYAN)Устанавливаю Kafka...$(NC)\n" + $(DOCKER_RUN) addon kafka $(ARGS) + +addon-kafka-ui: _check_env _check_image ## Установить Kafka UI с логином/паролем + @printf "$(CYAN)Устанавливаю Kafka UI...$(NC)\n" + $(DOCKER_RUN) addon kafka-ui $(ARGS) + +addon-rabbitmq: _check_env _check_image ## Установить RabbitMQ (Bitnami; ARGS="-e rabbitmq_mode=cluster") + @printf "$(CYAN)Устанавливаю RabbitMQ...$(NC)\n" + $(DOCKER_RUN) addon rabbitmq $(ARGS) + +addon-gitlab: _check_env _check_image ## Установить GitLab + Runner (ARGS="-e gitlab_ingress_host=gitlab.example.com") + @printf "$(CYAN)Устанавливаю GitLab + Runner...$(NC)\n" + $(DOCKER_RUN) addon gitlab $(ARGS) + addon-databasus: _check_env _check_image ## Установить Databasus — управление резервными копиями БД (ARGS="-e databasus_ingress_host=backup.example.com") @printf "$(CYAN)Устанавливаю Databasus...$(NC)\n" $(DOCKER_RUN) addon databasus $(ARGS) @@ -387,10 +412,14 @@ addon-csi-s3: _check_env _check_image ## Установить CSI S3 Driver — @printf "$(CYAN)Устанавливаю CSI S3 Driver...$(NC)\n" $(DOCKER_RUN) addon csi-s3 $(ARGS) -addon-csi-ceph: _check_env _check_image ## Установить Rook-Ceph — distributed block (RWO) + filesystem (RWX) storage - @printf "$(CYAN)Устанавливаю Rook-Ceph...$(NC)\n" +addon-csi-ceph: _check_env _check_image ## Установить Kubernetes CSI Ceph (Rook-Ceph, PVC на Ceph) + @printf "$(CYAN)Устанавливаю CSI Ceph...$(NC)\n" $(DOCKER_RUN) addon csi-ceph $(ARGS) +addon-ceph-rock: _check_env _check_image ## Установить Ceph-Rook — distributed block (RWO) + filesystem (RWX) storage + @printf "$(CYAN)Устанавливаю Rook-Ceph...$(NC)\n" + $(DOCKER_RUN) addon ceph-rock $(ARGS) + addon-csi-glusterfs: _check_env _check_image ## Установить CSI GlusterFS Driver (требует внешний GlusterFS + Heketi, ARGS="-e csi_glusterfs_heketi_url=...") @printf "$(CYAN)Устанавливаю CSI GlusterFS Driver...$(NC)\n" $(DOCKER_RUN) addon csi-glusterfs $(ARGS) @@ -653,6 +682,8 @@ molecule-addon-crowdsec: _check_image ## Тест аддона crowdsec (default $(DOCKER_RUN_MOLECULE) molecule-addon crowdsec molecule-addon-csi-ceph: _check_image ## Тест аддона csi-ceph (CephCluster шаблон), ~1 мин $(DOCKER_RUN_MOLECULE) molecule-addon csi-ceph +molecule-addon-ceph-rock: _check_image ## Тест аддона ceph-rock (CephCluster шаблон), ~1 мин + $(DOCKER_RUN_MOLECULE) molecule-addon ceph-rock molecule-addon-csi-glusterfs: _check_image ## Тест аддона csi-glusterfs (StorageClass шаблон), ~1 мин $(DOCKER_RUN_MOLECULE) molecule-addon csi-glusterfs molecule-addon-csi-nfs: _check_image ## Тест аддона csi-nfs (StorageClass шаблон), ~1 мин @@ -661,6 +692,18 @@ molecule-addon-csi-s3: _check_image ## Тест аддона csi-s3 (defaults va $(DOCKER_RUN_MOLECULE) molecule-addon csi-s3 molecule-addon-databasus: _check_image ## Тест аддона databasus (defaults validation), ~1 мин $(DOCKER_RUN_MOLECULE) molecule-addon databasus +molecule-addon-gitlab: _check_image ## Тест аддона gitlab (values шаблон), ~2 мин + $(DOCKER_RUN_MOLECULE) molecule-addon gitlab +molecule-addon-redis: _check_image ## Тест аддона redis (defaults validation), ~1 мин + $(DOCKER_RUN_MOLECULE) molecule-addon redis +molecule-addon-mongodb: _check_image ## Тест аддона mongodb (defaults validation), ~1 мин + $(DOCKER_RUN_MOLECULE) molecule-addon mongodb +molecule-addon-kafka: _check_image ## Тест аддона kafka (defaults validation), ~1 мин + $(DOCKER_RUN_MOLECULE) molecule-addon kafka +molecule-addon-kafka-ui: _check_image ## Тест аддона kafka-ui (defaults validation), ~1 мин + $(DOCKER_RUN_MOLECULE) molecule-addon kafka-ui +molecule-addon-rabbitmq: _check_image ## Тест аддона rabbitmq (defaults validation), ~1 мин + $(DOCKER_RUN_MOLECULE) molecule-addon rabbitmq molecule-addon-external-secrets: _check_image ## Тест аддона external-secrets (ClusterSecretStore шаблон), ~1 мин $(DOCKER_RUN_MOLECULE) molecule-addon external-secrets molecule-addon-gitea: _check_image ## Тест аддона gitea (Helm values шаблон), ~2 мин @@ -725,10 +768,17 @@ molecule-addon-all: _check_image ## Тест всех аддонов с Molecule $(MAKE) molecule-addon-cert-manager $(MAKE) molecule-addon-crowdsec $(MAKE) molecule-addon-csi-ceph + $(MAKE) molecule-addon-ceph-rock $(MAKE) molecule-addon-csi-glusterfs $(MAKE) molecule-addon-csi-nfs $(MAKE) molecule-addon-csi-s3 $(MAKE) molecule-addon-databasus + $(MAKE) molecule-addon-gitlab + $(MAKE) molecule-addon-redis + $(MAKE) molecule-addon-mongodb + $(MAKE) molecule-addon-kafka + $(MAKE) molecule-addon-kafka-ui + $(MAKE) molecule-addon-rabbitmq $(MAKE) molecule-addon-external-secrets $(MAKE) molecule-addon-gitea $(MAKE) molecule-addon-harbor diff --git a/README.md b/README.md index 673c5ed..09a3dcd 100644 --- a/README.md +++ b/README.md @@ -38,24 +38,28 @@ HA-режим (embedded etcd): при отказе **любой одной** н **CNI:** `flannel` (встроен) | `calico` (Network Policy, BGP) | `cilium` (eBPF, Hubble) -## Аддоны (37) +## Аддоны (48) | Категория | Аддоны | |---|---| -| **Сеть** | ingress-nginx, cert-manager, istio + kiali, crowdsec, netbird VPN | -| **Хранилище** | nfs-server, csi-nfs, longhorn, minio, csi-s3, csi-ceph, csi-glusterfs | -| **Базы данных** | postgresql, mysql, databasus | +| **Сеть** | ingress-nginx, cert-manager, istio + kiali, crowdsec, netbird VPN, technitium-dns, yandex-dns-controller | +| **Хранилище** | nfs-server, csi-nfs, longhorn, minio, csi-s3, csi-ceph (Ceph CSI), ceph-rock, csi-glusterfs | +| **Базы данных** | postgresql, mysql, redis, mongodb, kafka, kafka-ui, rabbitmq, databasus | | **Observability** | metrics-server, prometheus+grafana+alertmanager, loki, promtail, tempo, pushgateway | -| **CI/CD** | jenkins, gitea + actions, argocd | -| **Безопасность** | vault, external-secrets, vaultwarden | +| **CI/CD** | jenkins, gitea + actions, gitlab + runners, argocd | +| **Безопасность** | vault, external-secrets, vaultwarden, authelia | | **Инфраструктура** | harbor, kubernetes-dashboard, velero, smtp-relay | | **Файловые хранилища** | nextcloud, owncloud | | **Медиасервер** | mediaserver — Plex, Sonarr, Radarr, Lidarr, Bazarr, Prowlarr + Hysteria2, Overseerr, Transmission, Samba | -| **VPN / Прокси** | splitgw — прозрачный split-tunnel gateway (sing-box + Hysteria2 TPROXY, YouTube → прокси) | +| **VPN / Прокси** | hysteria2-server, splitgw — прозрачный split-tunnel gateway (sing-box + Hysteria2 TPROXY, YouTube → прокси) | | **Ingress Proxy** | ingress-proxypass — проксировать внешние сервисы (IP:PORT) через ingress-nginx по домену | Все аддоны включаются флагами в `group_vars/all/addons.yml`. Установка: `make addon-`. +Для сервисов с БД/кэшем добавлены явные режимы подключения: +- `*_database_mode`: `auto` / `internal` / `external_postgresql` +- `*_redis_mode`: `auto` / `internal` / `external_redis` (и `disabled` для Authelia) + ## Документация | Раздел | Описание | diff --git a/addons/argocd/README.md b/addons/argocd/README.md index f3027b8..2bebb02 100644 --- a/addons/argocd/README.md +++ b/addons/argocd/README.md @@ -21,6 +21,7 @@ make addon-argocd | `argocd_ingress_host` | `argocd.example.com` | Hostname | | `argocd_ingress_tls` | `false` | TLS через cert-manager | | `argocd_metrics_enabled` | `true` | Prometheus метрики | +| `argocd_redis_mode` | `auto` | `auto` \| `internal` \| `external_redis` | ```yaml # group_vars/all/addons.yml @@ -29,6 +30,13 @@ argocd_ingress_host: "argocd.example.com" argocd_ingress_tls: true ``` +## Режим Redis (встроенный или внешний) + +Выбор задаётся переменной `argocd_redis_mode`: +- `auto` — внешний Redis при `addon_redis: true`, иначе встроенный Redis чарта; +- `internal` — всегда встроенный Redis; +- `external_redis` — всегда внешний Redis (например из addon `redis`). + ## Первый вход После установки Ansible выводит начальный пароль. Сменить: @@ -154,3 +162,8 @@ stringData: username: gitea password: "token-or-password" ``` +## Официальные ресурсы + +- Официальный сайт: [https://argo-cd.readthedocs.io/](https://argo-cd.readthedocs.io/) +- Официальная документация: [https://argo-cd.readthedocs.io/](https://argo-cd.readthedocs.io/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/argo/argo-cd](https://artifacthub.io/packages/helm/argo/argo-cd) diff --git a/addons/argocd/role/defaults/main.yml b/addons/argocd/role/defaults/main.yml index 67c364c..156d319 100644 --- a/addons/argocd/role/defaults/main.yml +++ b/addons/argocd/role/defaults/main.yml @@ -24,6 +24,18 @@ argocd_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('lets argocd_metrics_enabled: true # ServiceMonitor создаётся только при addon_prometheus_stack: true +# Режим Redis для ArgoCD: +# auto — внешний Redis при addon_redis=true, иначе встроенный Redis чарта +# internal — всегда встроенный Redis чарта +# external_redis — всегда внешний Redis +argocd_redis_mode: "auto" # auto | internal | external_redis +# Хост внешнего Redis +argocd_redis_host: "redis-master.redis.svc.cluster.local" +# Порт внешнего Redis +argocd_redis_port: 6379 +# Пароль внешнего Redis (обычно из addon redis) +argocd_redis_password: "{{ vault_redis_password | default('') }}" + # Ресурсы подов ArgoCD (requests/limits) argocd_resources: requests: diff --git a/addons/argocd/role/tasks/main.yml b/addons/argocd/role/tasks/main.yml index b0bb67f..08616ff 100644 --- a/addons/argocd/role/tasks/main.yml +++ b/addons/argocd/role/tasks/main.yml @@ -6,6 +6,15 @@ environment: KUBECONFIG: "{{ k3s_kubeconfig_path }}" +- name: Resolve ArgoCD Redis mode + ansible.builtin.set_fact: + _argocd_use_external_redis: >- + {{ + (argocd_redis_mode == 'external_redis') + or + (argocd_redis_mode == 'auto' and (addon_redis | default(false) | bool)) + }} + - name: Install ArgoCD via Helm kubernetes.core.helm: name: argocd @@ -50,6 +59,7 @@ serviceMonitor: enabled: "{{ argocd_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}" redis: + enabled: "{{ (not _argocd_use_external_redis) | bool }}" resources: requests: cpu: 25m @@ -57,6 +67,10 @@ limits: cpu: 200m memory: 128Mi + externalRedis: + host: "{{ argocd_redis_host if _argocd_use_external_redis else '' }}" + port: "{{ argocd_redis_port if _argocd_use_external_redis else 6379 }}" + password: "{{ argocd_redis_password if _argocd_use_external_redis else '' }}" environment: KUBECONFIG: "{{ k3s_kubeconfig_path }}" @@ -94,6 +108,7 @@ Логин: admin Пароль: {{ argocd_password_b64.stdout | b64decode if argocd_password_b64.rc == 0 else '(не найден — смени через argocd CLI)' }} + Redis: {{ 'внешний (' + argocd_redis_host + ')' if _argocd_use_external_redis else 'встроенный' }} Смени пароль после первого входа! После смены удали секрет: kubectl -n {{ argocd_namespace }} delete secret argocd-initial-admin-secret diff --git a/addons/authelia/README.md b/addons/authelia/README.md index b8dc098..0738db5 100644 --- a/addons/authelia/README.md +++ b/addons/authelia/README.md @@ -1,494 +1,85 @@ # authelia -Self-hosted authentication system providing **forward-auth** for ingress-nginx and an **OIDC provider** for Gitea, Grafana, ArgoCD, MinIO, Vault, and Nextcloud. +Authelia — централизованный сервис аутентификации для Kubernetes: `forward-auth` для `ingress-nginx` и OIDC-провайдер для сервисов (Gitea, Grafana, ArgoCD, Vault, MinIO и др.). -## Architecture +## Что делает аддон -``` -User → ingress-nginx → (auth-url) → Authelia :9091 → allowed/denied - ↓ - Authelia portal - auth.home.local - ↓ - users_database.yml - (argon2id passwords) +- разворачивает Authelia в отдельном namespace; +- создаёт ingress для портала входа (`authelia_host`); +- генерирует/использует конфигурацию доступа по доменам; +- включает OIDC-клиенты для выбранных сервисов; +- хранит чувствительные данные в `group_vars/all/vault.yml`. -OIDC flow: -Service → Authelia /api/oidc/authorization → login → token → Service -``` +## Быстрый старт -``` -Traffic routes: - auth.home.local → authelia ClusterIP :9091 (Ingress, NO forward-auth) - sonarr.home.local → sonarr service (Ingress + forward-auth annotations) - gitea.home.local → gitea service (Ingress, no forward-auth — OIDC handles it) - -Kubernetes objects: - Deployment: authelia - Service: authelia (ClusterIP :9091) - Secrets: authelia-secrets (jwt, session, storage_encryption, oidc keys) - authelia-config (configuration.yml — contains OIDC client secrets) - authelia-users (users_database.yml) - PVC: authelia-data (SQLite db + notification.txt) - Ingress: authelia (auth.home.local) - ConfigMap: authelia-forward-auth (copy-paste annotation reference) - Deployment: authelia-redis (optional, redis.enabled=true) -``` - ---- - -## 1. Installation - -### Step 1 — Generate secrets - -Run these commands and save the output: -```bash -# Core secrets -openssl rand -base64 64 # → authelia_jwt_secret -openssl rand -base64 64 # → authelia_session_secret -openssl rand -base64 32 # → authelia_storage_encryption_key -openssl rand -base64 48 # → authelia_oidc_hmac_secret - -# OIDC client secrets (one per enabled client) -openssl rand -hex 32 # → authelia_oidc_secret_gitea -openssl rand -hex 32 # → authelia_oidc_secret_grafana -``` - -> The **OIDC RSA private key** is auto-generated during deploy. Leave `authelia_oidc_private_key: ""` in vault. After first deploy, retrieve and save it (see note at end of deploy output). - -### Step 2 — Generate admin password hash - -```bash -docker run --rm authelia/authelia:latest authelia hash-password 'your-password' -# Output: $argon2id$v=19$m=65536,t=3,p=4$... -``` - -Copy the full `$argon2id$...` string. - -### Step 3 — Edit vault.yml +1. Включите аддон в `group_vars/all/addons.yml`: ```yaml -# group_vars/all/vault.yml (ansible-vault encrypted) -authelia_jwt_secret: "" -authelia_session_secret: "" -authelia_storage_encryption_key: "" -authelia_oidc_hmac_secret: "" -authelia_oidc_private_key: "" # auto-generated on first deploy - -authelia_oidc_secret_gitea: "" -authelia_oidc_secret_grafana: "" - -authelia_user_admin_password_hash: "$argon2id$v=19$m=65536,t=3,p=4$..." -``` - -### Step 4 — Configure addons.yml - -```yaml -# group_vars/all/addons.yml addon_authelia: true - -authelia_host: "auth.home.local" +authelia_host: "auth.home.local" authelia_domain: "home.local" - -# OIDC clients to enable -authelia_oidc_gitea_enabled: true -authelia_oidc_grafana_enabled: true - -# Domains to protect -authelia_protected_domains: - - sonarr.home.local - - radarr.home.local - - lidarr.home.local - - prowlarr.home.local - - pgadmin.home.local - -# Domains requiring admin group -authelia_admin_domains: - - argocd.home.local - - vault.home.local - -# Public bypass (no auth) -authelia_bypass_domains: - - plex.home.local ``` -### Step 5 — Deploy +2. Добавьте секреты в `group_vars/all/vault.yml`: + +```yaml +authelia_jwt_secret: "" +authelia_session_secret: "" +authelia_storage_encryption_key: "" +authelia_oidc_hmac_secret: "" +authelia_user_admin_password_hash: "$argon2id$..." +``` + +3. Установите аддон: ```bash make addon-authelia ``` -### Step 6 — Add DNS record - -Add `auth.home.local` pointing to the kube-vip/ingress-nginx IP in Technitium DNS (or your DNS server). - ---- - -## 2. Managing users - -### Add a new user - -1. Generate password hash: - ```bash - docker run --rm authelia/authelia:latest authelia hash-password 'newpassword' - ``` - -2. Add to `group_vars/all/addons.yml`: - ```yaml - authelia_users: - admin: - displayname: "Administrator" - email: "admin@home.local" - groups: [admins, users] - alice: - displayname: "Alice" - email: "alice@home.local" - groups: [users] - ``` - -3. Add to `vault.yml`: - ```yaml - authelia_user_alice_password_hash: "$argon2id$..." - ``` - -4. Redeploy: `make addon-authelia` - -### Groups - -- `admins` — access to `authelia_admin_domains` (ArgoCD, Vault, Harbor, Dashboard) -- `users` — access to `authelia_protected_domains` (Sonarr, Radarr, etc.) - -OIDC claims include the `groups` scope, so Grafana/Gitea can use group-based role mapping. - ---- - -## 3. Protect a new service with forward-auth - -### Step 1 — Add annotations to the service's Ingress +## Пример защиты ingress через forward-auth ```yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: myservice - namespace: myservice annotations: kubernetes.io/ingress.class: nginx - # ── Authelia forward-auth ────────────────────────────────────────────── nginx.ingress.kubernetes.io/auth-url: "http://authelia.authelia.svc.cluster.local:9091/api/authz/forward-auth" nginx.ingress.kubernetes.io/auth-signin: "http://auth.home.local/?rd=$scheme://$host$escaped_request_uri" - nginx.ingress.kubernetes.io/auth-response-headers: "Remote-User,Remote-Name,Remote-Groups,Remote-Email" - nginx.ingress.kubernetes.io/auth-snippet: | - proxy_set_header X-Forwarded-Method $request_method; -spec: - rules: - - host: myservice.home.local - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: myservice - port: - number: 8080 ``` -> Get the exact URLs for your deployment: -> ```bash -> kubectl get cm authelia-forward-auth -n authelia -o jsonpath='{.data.annotations\.yaml}' -> ``` - -### Step 2 — Add to access control - -Add `myservice.home.local` to the appropriate list in `addons.yml`: -```yaml -authelia_protected_domains: - - myservice.home.local - # ... existing entries -``` - -Then redeploy: `make addon-authelia` - ---- - -## 4. OIDC client configuration per service - -### Gitea - -In Gitea Admin → Site Administration → Authentication Sources → Add OAuth2: - -| Field | Value | -|-------|-------| -| Name | `Authelia` | -| OAuth2 Provider | `OpenID Connect` | -| Client ID | `gitea` | -| Client Secret | value of `authelia_oidc_secret_gitea` in vault | -| OpenID Connect Auto Discovery URL | `http://auth.home.local/.well-known/openid-configuration` | -| Scopes | `openid profile email groups` | - -Restart Gitea. Users can now click "Sign in with Authelia" on the login page. - -Optional — auto-create users and map admin group: -```ini -# app.ini -[oauth2] -USERNAME = preferred_username -UPDATE_AVATAR = true - -[openid] -ENABLE_OPENID_SIGNIN = true -``` - ---- - -### Grafana - -Add to Grafana's `grafana.ini` (or Helm values): +## Пример OIDC для Grafana ```ini [auth.generic_oauth] enabled = true name = Authelia client_id = grafana -client_secret = +client_secret = <секрет из vault> scopes = openid profile email groups auth_url = http://auth.home.local/api/oidc/authorization token_url = http://auth.home.local/api/oidc/token api_url = http://auth.home.local/api/oidc/userinfo - -# Role mapping via Authelia groups -role_attribute_path = contains(groups[*], 'admins') && 'GrafanaAdmin' || 'Viewer' -allow_sign_up = true ``` -Or in Helm values for the prometheus-stack addon: -```yaml -# group_vars/all/addons.yml -prometheus_grafana_oauth_enabled: true -prometheus_grafana_oauth_client_id: "grafana" -prometheus_grafana_oauth_client_secret: "..." # from vault -prometheus_grafana_oauth_auth_url: "http://auth.home.local/api/oidc/authorization" -prometheus_grafana_oauth_token_url: "http://auth.home.local/api/oidc/token" -prometheus_grafana_oauth_api_url: "http://auth.home.local/api/oidc/userinfo" -``` - ---- - -### ArgoCD - -Enable ArgoCD OIDC in `argocd-cm` ConfigMap: -```yaml -# values override for argocd Helm chart -server: - config: - oidc.config: | - name: Authelia - issuer: http://auth.home.local - clientID: argocd - clientSecret: $oidc.authelia.clientSecret - requestedScopes: - - openid - - profile - - email - - groups - requestedIDTokenClaims: - groups: - essential: true -``` - -Map Authelia groups to ArgoCD RBAC: -```yaml -# argocd-rbac-cm -policy.csv: | - g, admins, role:admin -policy.default: role:readonly -``` - -Store the secret in a K8s Secret `argocd-secret`: -```yaml -data: - oidc.authelia.clientSecret: -``` - ---- - -### MinIO - -In MinIO Console → Identity → OpenID: - -| Field | Value | -|-------|-------| -| Config URL | `http://auth.home.local/.well-known/openid-configuration` | -| Client ID | `minio` | -| Client Secret | value of `authelia_oidc_secret_minio` | -| Claim Name | `groups` | -| Scopes | `openid,profile,email` | - -Map Authelia group `admins` to MinIO policy `consoleAdmin`. - ---- - -### Vault - -Configure Vault OIDC auth method: -```bash -vault auth enable oidc - -vault write auth/oidc/config \ - oidc_discovery_url="http://auth.home.local" \ - oidc_client_id="vault" \ - oidc_client_secret="" \ - default_role="reader" - -vault write auth/oidc/role/reader \ - bound_audiences="vault" \ - allowed_redirect_uris="https://vault.home.local/ui/vault/auth/oidc/oidc/callback" \ - allowed_redirect_uris="https://vault.home.local/oidc/callback" \ - user_claim="sub" \ - groups_claim="groups" \ - token_policies="default" -``` - ---- - -## 5. Debugging auth issues - -### Check Authelia logs (real-time) +## Проверка ```bash -kubectl -n authelia logs -l app.kubernetes.io/name=authelia -f --tail=100 +kubectl -n authelia get pods +kubectl -n authelia logs -l app.kubernetes.io/name=authelia --tail=100 ``` -Common log messages: -- `"ALLOW"` — request was allowed -- `"DENY"` — request was denied (check domain in `protectedDomains`) -- `"Redirecting"` — unauthenticated user redirected to login -- `"POST /api/firstfactor"` — login attempt +## Режим Redis (встроенный или внешний) -### Verify forward-auth endpoint is reachable +Выбор задаётся переменной `authelia_redis_mode`: +- `auto` — внешний Redis при `addon_redis: true`, иначе встроенный Redis чарта; +- `internal` — всегда встроенный Redis; +- `external_redis` — всегда внешний Redis (например из addon `redis`); +- `disabled` — без Redis (сессии в памяти). -From within the cluster: -```bash -kubectl run curl-test --rm -it --image=curlimages/curl -- \ - curl -v http://authelia.authelia.svc.cluster.local:9091/api/health -# Expected: {"status":"OK"} -``` +## Официальные ресурсы -### Test that a domain's annotations are applied - -```bash -kubectl get ingress sonarr -n mediaserver -o yaml | grep auth-url -``` - -### Check current access control configuration - -```bash -kubectl -n authelia exec deploy/authelia -- \ - cat /config/configuration.yml | grep -A 30 "access_control:" -``` - -### Check active sessions (SQLite) - -```bash -kubectl -n authelia exec deploy/authelia -- \ - sqlite3 /data/db.sqlite3 "SELECT subject, ip, last_activity FROM user_opaque_identifier LIMIT 20;" -``` - -### Notification log (if SMTP disabled) - -```bash -kubectl -n authelia exec deploy/authelia -- cat /data/notification.txt -``` - ---- - -## 6. Test the login flow - -### Forward-auth flow (e.g., Sonarr) - -1. Open `http://sonarr.home.local/` in a private browser window -2. Should redirect to `http://auth.home.local/?rd=http%3A%2F%2Fsonarr.home.local%2F` -3. Enter credentials → should redirect back to Sonarr -4. Access granted - -Test from command line: -```bash -# Step 1: login request (expect redirect to Authelia) -curl -I http://sonarr.home.local/ -# Expected: 302 to auth.home.local - -# Step 2: verify endpoint directly -curl -I -H "X-Forwarded-Host: sonarr.home.local" \ - -H "X-Forwarded-URI: /" \ - -H "X-Forwarded-Proto: http" \ - http://authelia.authelia.svc.cluster.local:9091/api/authz/forward-auth -# Expected: 401 (unauthenticated) or 200 (if session cookie provided) -``` - -### OIDC flow (e.g., Gitea) - -1. Open `http://gitea.home.local/user/oauth2/Authelia` - (or click "Sign in with Authelia" on Gitea login page) -2. Should redirect to Authelia login form -3. Login with admin credentials -4. Authelia redirects back to Gitea with authorization code -5. Gitea exchanges code for token — user is logged in - -### Check OIDC discovery endpoint - -```bash -curl -s http://auth.home.local/.well-known/openid-configuration | jq . -# Should return JSON with issuer, authorization_endpoint, token_endpoint, etc. -``` - ---- - -## Variables reference - -| Variable | Default | Description | -|----------|---------|-------------| -| `authelia_host` | `auth.home.local` | Portal hostname | -| `authelia_domain` | `home.local` | Session cookie domain | -| `authelia_theme` | `dark` | UI theme | -| `authelia_two_factor_enabled` | `false` | Require TOTP/WebAuthn | -| `authelia_storage_type` | `sqlite` | `sqlite` or `postgresql` | -| `authelia_redis_enabled` | `false` | Built-in Redis for sessions | -| `authelia_smtp_enabled` | `false` | SMTP for 2FA/password-reset emails | -| `authelia_oidc_enabled` | `true` | Enable OIDC provider | -| `authelia_oidc_gitea_enabled` | `true` | Gitea OIDC client | -| `authelia_oidc_grafana_enabled` | `true` | Grafana OIDC client | -| `authelia_oidc_argocd_enabled` | `false` | ArgoCD OIDC client | -| `authelia_oidc_minio_enabled` | `false` | MinIO OIDC client | -| `authelia_oidc_vault_enabled` | `false` | Vault OIDC client | -| `authelia_ingress_tls_enabled` | `false` | TLS on auth portal | -| `authelia_protected_domains` | `[sonarr, radarr…]` | Domains requiring login | -| `authelia_admin_domains` | `[argocd, vault…]` | Admin-only domains | -| `authelia_bypass_domains` | `[]` | Public bypass domains | -| `authelia_oidc_domains` | `[gitea, grafana, minio]` | OIDC bypass (forward-auth off) | - -### Vault secrets required - -| Variable | Notes | -|----------|-------| -| `authelia_jwt_secret` | min 64 chars — `openssl rand -base64 64` | -| `authelia_session_secret` | min 64 chars | -| `authelia_storage_encryption_key` | min 20 chars — `openssl rand -base64 32` | -| `authelia_oidc_hmac_secret` | min 32 chars — `openssl rand -base64 48` | -| `authelia_oidc_private_key` | RSA PEM — leave empty, auto-generated | -| `authelia_oidc_secret_gitea` | `openssl rand -hex 32` | -| `authelia_oidc_secret_grafana` | `openssl rand -hex 32` | -| `authelia_user_admin_password_hash` | argon2id hash from `authelia hash-password` | - ---- - -## Saving the auto-generated OIDC private key - -After the first deploy, save the key to vault for reproducibility: - -```bash -kubectl -n authelia get secret authelia-secrets \ - -o jsonpath='{.data.oidc_private_key}' | base64 -d -``` - -Paste the PEM output into `vault.yml` as `authelia_oidc_private_key: |` (multiline YAML). +- Официальный сайт: [https://www.authelia.com/](https://www.authelia.com/) +- Официальная документация: [https://www.authelia.com/integration/kubernetes/](https://www.authelia.com/integration/kubernetes/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/authelia/authelia](https://artifacthub.io/packages/helm/authelia/authelia) diff --git a/addons/authelia/role/chart/files/configuration.yml.tpl b/addons/authelia/role/chart/files/configuration.yml.tpl index cbce870..293e68f 100644 --- a/addons/authelia/role/chart/files/configuration.yml.tpl +++ b/addons/authelia/role/chart/files/configuration.yml.tpl @@ -61,8 +61,8 @@ session: remember_me_duration: {{ .Values.session.rememberMeDuration | quote }} {{- if .Values.redis.enabled }} redis: - host: {{ printf "%s-redis" (include "authelia.name" .) | quote }} - port: 6379 + host: {{ if eq .Values.redis.mode "external" }}{{ .Values.redis.external.host | quote }}{{ else }}{{ printf "%s-redis" (include "authelia.name" .) | quote }}{{ end }} + port: {{ if eq .Values.redis.mode "external" }}{{ .Values.redis.external.port }}{{ else }}6379{{ end }} {{- end }} regulation: diff --git a/addons/authelia/role/chart/templates/redis.yaml b/addons/authelia/role/chart/templates/redis.yaml index 15bcd23..e6bd456 100644 --- a/addons/authelia/role/chart/templates/redis.yaml +++ b/addons/authelia/role/chart/templates/redis.yaml @@ -1,4 +1,4 @@ -{{- if .Values.redis.enabled }} +{{- if and .Values.redis.enabled (ne .Values.redis.mode "external") }} --- # Redis Deployment — session storage for Authelia (optional but recommended) apiVersion: apps/v1 diff --git a/addons/authelia/role/chart/values.yaml b/addons/authelia/role/chart/values.yaml index 348660f..d2e7dad 100644 --- a/addons/authelia/role/chart/values.yaml +++ b/addons/authelia/role/chart/values.yaml @@ -81,6 +81,10 @@ storage: # Recommended for production; not required for homelab. redis: enabled: false + mode: internal # internal | external + external: + host: "redis-master.redis.svc.cluster.local" + port: 6379 image: "redis:7-alpine" resources: requests: diff --git a/addons/authelia/role/defaults/main.yml b/addons/authelia/role/defaults/main.yml index b1f8725..ddf28c9 100644 --- a/addons/authelia/role/defaults/main.yml +++ b/addons/authelia/role/defaults/main.yml @@ -46,8 +46,18 @@ authelia_db_name: authelia authelia_db_user: authelia # ── Redis session storage (optional) ───────────────────────────────────────── -# Вынести сессии в Redis (кластерное масштабирование) +# Режим Redis для хранения сессий: +# auto — внешний Redis при addon_redis=true, иначе встроенный Redis чарта +# internal — всегда встроенный Redis чарта +# external_redis — всегда внешний Redis (например addon_redis) +# disabled — Redis отключен, сессии хранятся в памяти +authelia_redis_mode: "auto" # auto | internal | external_redis | disabled +# Вынести сессии в Redis (legacy alias; используется только при прямом переопределении) authelia_redis_enabled: false +# Хост внешнего Redis +authelia_redis_host: "redis-master.redis.svc.cluster.local" +# Порт внешнего Redis +authelia_redis_port: 6379 # ── SMTP notifier (optional) ────────────────────────────────────────────────── # Отправка писем (сброс пароля и т.д.) через SMTP diff --git a/addons/authelia/role/molecule/default/converge.yml b/addons/authelia/role/molecule/default/converge.yml index f3e1940..2c3c745 100644 --- a/addons/authelia/role/molecule/default/converge.yml +++ b/addons/authelia/role/molecule/default/converge.yml @@ -21,7 +21,9 @@ authelia_db_port: 5432 authelia_db_name: authelia authelia_db_user: authelia - authelia_redis_enabled: false + authelia_redis_mode: "disabled" + authelia_redis_host: "redis-master.redis.svc.cluster.local" + authelia_redis_port: 6379 authelia_smtp_enabled: false authelia_smtp_host: "" authelia_smtp_port: 587 diff --git a/addons/authelia/role/tasks/main.yml b/addons/authelia/role/tasks/main.yml index 09a5034..f554c58 100644 --- a/addons/authelia/role/tasks/main.yml +++ b/addons/authelia/role/tasks/main.yml @@ -185,7 +185,7 @@ - " Namespace : {{ authelia_namespace }}" - " OIDC : {{ 'enabled' if authelia_oidc_enabled else 'disabled' }}" - " Storage : {{ authelia_storage_type }}" - - " Redis : {{ 'enabled' if authelia_redis_enabled else 'disabled' }}" + - " Redis : {{ authelia_redis_mode }}" - "" - " ── Protect a new service (add to its Ingress) ──" - "{{ _annotations.stdout_lines | to_yaml }}" diff --git a/addons/authelia/role/templates/values.yaml.j2 b/addons/authelia/role/templates/values.yaml.j2 index 7d2f873..d71eb3c 100644 --- a/addons/authelia/role/templates/values.yaml.j2 +++ b/addons/authelia/role/templates/values.yaml.j2 @@ -57,7 +57,23 @@ storage: schema: public redis: - enabled: {{ authelia_redis_enabled | string | lower }} + enabled: >- + {{ + ( + authelia_redis_mode != 'disabled' + ) | string | lower + }} + mode: >- + {{ + 'external' + if (authelia_redis_mode == 'external_redis' + or (authelia_redis_mode == 'auto' and (addon_redis | default(false) | bool)) + ) + else 'internal' + }} + external: + host: {{ authelia_redis_host | quote }} + port: {{ authelia_redis_port }} notifier: smtp: diff --git a/addons/ceph-rock/README.md b/addons/ceph-rock/README.md new file mode 100644 index 0000000..fa0d3ff --- /dev/null +++ b/addons/ceph-rock/README.md @@ -0,0 +1,109 @@ +# Ceph-Rock / Rook-Ceph + +Distributed storage на базе Ceph, управляемый Rook-оператором. Предоставляет: +- **Block storage** (RWO) — `rook-ceph-block` StorageClass +- **Filesystem storage** (RWX) — `rook-ceph-filesystem` StorageClass + +Требует минимум **3 ноды** с незанятыми дисками для OSD. + +## Быстрый старт + +```yaml +# group_vars/all/addons.yml +addon_ceph_rock: true +``` + +```bash +make addon-ceph-rock +``` + +## Параметры + +| Переменная | Умолч. | Описание | +|---|---|---| +| `rook_ceph_mon_count` | `3` | Количество MON | +| `rook_ceph_block_replica_count` | `3` | Реплики блочного хранилища | +| `rook_ceph_devices` | `[]` | Список raw-устройств для OSD | +| `rook_ceph_use_all_devices` | `false` | Авто-использовать все свободные диски | +| `rook_ceph_block_storage_class` | `rook-ceph-block` | Имя StorageClass (RWO) | +| `rook_ceph_filesystem_storage_class` | `rook-ceph-filesystem` | Имя StorageClass (RWX) | + +## Single-node конфигурация + +```yaml +rook_ceph_mon_count: 1 +rook_ceph_allow_multiple_mon_per_node: true +rook_ceph_block_replica_count: 1 +``` + +## Использование конкретных дисков + +```yaml +rook_ceph_devices: + - "/dev/sdb" + - "/dev/sdc" +``` + +## Использование в PVC + +### Block storage (RWO) + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: my-db-data +spec: + accessModes: [ReadWriteOnce] + storageClassName: rook-ceph-block + resources: + requests: + storage: 20Gi +``` + +### Filesystem storage (RWX) + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: shared-data +spec: + accessModes: [ReadWriteMany] + storageClassName: rook-ceph-filesystem + resources: + requests: + storage: 50Gi +``` + +## Dashboard + +Включён по умолчанию. Доступ: + +```bash +kubectl -n rook-ceph port-forward svc/rook-ceph-mgr-dashboard 7000 +# http://localhost:7000 +# Логин: admin +kubectl -n rook-ceph get secret rook-ceph-dashboard-password \ + -o jsonpath='{.data.password}' | base64 -d +``` + +Или через Ingress: +```yaml +rook_ceph_dashboard_ingress_enabled: true +rook_ceph_dashboard_ingress_host: "ceph.example.com" +``` + +## Диагностика + +```bash +kubectl -n rook-ceph get cephcluster +kubectl -n rook-ceph get pods +kubectl exec -n rook-ceph deployment/rook-ceph-tools -- ceph status +kubectl exec -n rook-ceph deployment/rook-ceph-tools -- ceph osd status +``` +## Официальные ресурсы + +- Официальный сайт: [https://rook.io/](https://rook.io/) +- Официальная документация: [https://rook.io/docs/rook/latest-release/Storage-Configuration/Ceph-CSI/ceph-csi-drivers/](https://rook.io/docs/rook/latest-release/Storage-Configuration/Ceph-CSI/ceph-csi-drivers/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/rook-release/rook-ceph](https://artifacthub.io/packages/helm/rook-release/rook-ceph) diff --git a/addons/ceph-rock/playbook.yml b/addons/ceph-rock/playbook.yml new file mode 100644 index 0000000..1447679 --- /dev/null +++ b/addons/ceph-rock/playbook.yml @@ -0,0 +1,7 @@ +--- +- name: Install Ceph-Rock + hosts: k3s_master[0] + gather_facts: false + become: true + roles: + - role: "{{ playbook_dir }}/role" diff --git a/addons/ceph-rock/role/defaults/main.yml b/addons/ceph-rock/role/defaults/main.yml new file mode 100644 index 0000000..677c6d2 --- /dev/null +++ b/addons/ceph-rock/role/defaults/main.yml @@ -0,0 +1,66 @@ +--- +# Версия Helm-чарта Rook +rook_ceph_version: "1.14.0" +# Namespace Rook/Ceph +rook_ceph_namespace: "rook-ceph" +# Helm-репозиторий Rook +rook_ceph_chart_repo: "https://charts.rook.io/release" + +# Версия образа Ceph +# Образ Ceph daemon +rook_ceph_image: "quay.io/ceph/ceph:v18.2.2" + +# Мониторы (MON) — для single-node: count=1, allowMultiplePerNode=true +# Количество MON +rook_ceph_mon_count: 3 +# Разрешить несколько MON на одной ноде (lab) +rook_ceph_allow_multiple_mon_per_node: false + +# Путь на хосте для хранения данных Ceph (OSD в filestore-режиме) +# Для использования raw block-устройств — оставь пустым и задай rook_ceph_devices +# Каталог данных на нодах (directory store) +rook_ceph_data_dir: "/var/lib/rook" + +# Raw block-устройства для OSD (например ["/dev/sdb", "/dev/sdc"]) +# Если пусто — Ceph использует директорию rook_ceph_data_dir +# Список блочных устройств для OSD +rook_ceph_devices: [] + +# Использовать все доступные (не смонтированные) диски автоматически +rook_ceph_use_all_devices: false + +# Dashboard +# Включить Ceph Dashboard +rook_ceph_dashboard_enabled: true + +# StorageClasses +# Имя block pool +rook_ceph_block_pool_name: "replicapool" +# Число реплик в пуле +rook_ceph_block_replica_count: 3 # уменьши до 1 для single-node +# Имя StorageClass для RBD +rook_ceph_block_storage_class: "rook-ceph-block" +# Сделать RBD StorageClass default +rook_ceph_block_storage_class_default: false + +# Имя CephFS +rook_ceph_filesystem_name: "ceph-filesystem" +# Имя StorageClass для CephFS +rook_ceph_filesystem_storage_class: "rook-ceph-filesystem" + +# Ingress для Ceph Dashboard +# Публиковать Ceph Dashboard через Ingress +rook_ceph_dashboard_ingress_enabled: false +# Хост Dashboard +rook_ceph_dashboard_ingress_host: "ceph.local" +# IngressClass +rook_ceph_dashboard_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}" +# TLS +rook_ceph_dashboard_ingress_tls: false +# Issuer cert-manager +rook_ceph_dashboard_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}" + +# Метрики +# Экспорт метрик Ceph для Prometheus +rook_ceph_metrics_enabled: true +# ServiceMonitor создаётся только когда addon_prometheus_stack: true diff --git a/addons/ceph-rock/role/molecule/default/converge.yml b/addons/ceph-rock/role/molecule/default/converge.yml new file mode 100644 index 0000000..be1b982 --- /dev/null +++ b/addons/ceph-rock/role/molecule/default/converge.yml @@ -0,0 +1,30 @@ +--- +- name: Converge — ceph-rock (rook-ceph) template tests + hosts: all + become: false + gather_facts: false + + vars: + rook_ceph_namespace: rook-ceph + rook_ceph_image: "quay.io/ceph/ceph:v18.2.2" + rook_ceph_mon_count: 3 + rook_ceph_allow_multiple_mon_per_node: false + rook_ceph_data_dir: "/var/lib/rook" + rook_ceph_devices: [] + rook_ceph_use_all_devices: false + rook_ceph_dashboard_enabled: true + rook_ceph_block_pool_name: "replicapool" + rook_ceph_block_replica_count: 3 + rook_ceph_block_storage_class: "ceph-block" + rook_ceph_block_storage_class_default: false + rook_ceph_filesystem_name: "ceph-filesystem" + rook_ceph_filesystem_storage_class: "ceph-filesystem" + rook_ceph_metrics_enabled: false + addon_prometheus_stack: false + + tasks: + - name: Render ceph-cluster.yaml.j2 + ansible.builtin.template: + src: "{{ playbook_dir }}/../../templates/ceph-cluster.yaml.j2" + dest: /tmp/ceph-cluster.yaml + mode: "0644" diff --git a/addons/ceph-rock/role/molecule/default/molecule.yml b/addons/ceph-rock/role/molecule/default/molecule.yml new file mode 100644 index 0000000..c64b633 --- /dev/null +++ b/addons/ceph-rock/role/molecule/default/molecule.yml @@ -0,0 +1,28 @@ +--- +driver: + name: docker + +platforms: + - name: master01 + image: geerlingguy/docker-ubuntu2204-ansible:latest + pre_build_image: true + groups: + - k3s_master + +provisioner: + name: ansible + playbooks: + converge: converge.yml + verify: verify.yml + config_options: + defaults: + interpreter_python: auto_silent + +verifier: + name: ansible + +lint: | + set -e + yamllint . + ansible-lint + diff --git a/addons/ceph-rock/role/molecule/default/verify.yml b/addons/ceph-rock/role/molecule/default/verify.yml new file mode 100644 index 0000000..78c83f0 --- /dev/null +++ b/addons/ceph-rock/role/molecule/default/verify.yml @@ -0,0 +1,50 @@ +--- +- name: Verify — ceph-rock templates + hosts: all + become: false + gather_facts: false + + tasks: + - name: Read rendered ceph-cluster manifest + ansible.builtin.slurp: + src: /tmp/ceph-cluster.yaml + register: manifest_raw + + - name: Set content fact + ansible.builtin.set_fact: + content: "{{ manifest_raw.content | b64decode }}" + + - name: Assert CephCluster kind is present + ansible.builtin.assert: + that: "'kind: CephCluster' in content" + fail_msg: "kind: CephCluster не найден" + + - name: Assert CephBlockPool kind is present + ansible.builtin.assert: + that: "'kind: CephBlockPool' in content" + fail_msg: "kind: CephBlockPool не найден" + + - name: Assert StorageClass is present (for RBD) + ansible.builtin.assert: + that: "'kind: StorageClass' in content" + fail_msg: "kind: StorageClass не найден" + + - name: Assert CephFilesystem kind is present + ansible.builtin.assert: + that: "'kind: CephFilesystem' in content" + fail_msg: "kind: CephFilesystem не найден" + + - name: Assert mon count is 3 + ansible.builtin.assert: + that: "'count: 3' in content" + fail_msg: "mon count: 3 не найден" + + - name: Assert namespace is rook-ceph + ansible.builtin.assert: + that: "'namespace: rook-ceph' in content" + fail_msg: "namespace: rook-ceph не найден" + + - name: Assert ceph image is set + ansible.builtin.assert: + that: "'quay.io/ceph/ceph' in content" + fail_msg: "ceph image не найден" diff --git a/addons/ceph-rock/role/tasks/main.yml b/addons/ceph-rock/role/tasks/main.yml new file mode 100644 index 0000000..8e1cbe2 --- /dev/null +++ b/addons/ceph-rock/role/tasks/main.yml @@ -0,0 +1,98 @@ +--- +- name: Add Rook Helm repo + kubernetes.core.helm_repository: + name: rook-release + repo_url: "{{ rook_ceph_chart_repo }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Install Rook-Ceph operator via Helm + kubernetes.core.helm: + name: rook-ceph + chart_ref: rook-release/rook-ceph + chart_version: "{{ rook_ceph_version }}" + release_namespace: "{{ rook_ceph_namespace }}" + create_namespace: true + wait: true + timeout: "10m0s" + values: + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + discover: + tolerations: + - operator: "Exists" + csi: + provisionerTolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + pluginTolerations: + - operator: "Exists" + monitoring: + enabled: "{{ rook_ceph_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Wait for Rook operator to be ready + ansible.builtin.command: > + k3s kubectl -n {{ rook_ceph_namespace }} + rollout status deployment/rook-ceph-operator --timeout=180s + changed_when: false + retries: 3 + delay: 10 + +- name: Template CephCluster + StorageClasses manifest + ansible.builtin.template: + src: ceph-cluster.yaml.j2 + dest: /tmp/ceph-cluster.yaml + mode: '0644' + +- name: Apply CephCluster + StorageClasses + ansible.builtin.command: k3s kubectl apply -f /tmp/ceph-cluster.yaml + changed_when: true + +- name: Wait for Ceph monitors to be ready (может занять несколько минут) + ansible.builtin.command: > + k3s kubectl -n {{ rook_ceph_namespace }} + wait cephcluster/rook-ceph + --for=jsonpath='{.status.phase}'=Ready + --timeout=600s + changed_when: false + retries: 5 + delay: 30 + failed_when: false + +- name: Create Ceph Dashboard Ingress + ansible.builtin.template: + src: ceph-dashboard-ingress.yaml.j2 + dest: /tmp/ceph-dashboard-ingress.yaml + mode: '0644' + when: rook_ceph_dashboard_ingress_enabled | bool + +- name: Apply Ceph Dashboard Ingress + ansible.builtin.command: k3s kubectl apply -f /tmp/ceph-dashboard-ingress.yaml + changed_when: true + when: rook_ceph_dashboard_ingress_enabled | bool + +- name: Get Ceph Dashboard admin password + ansible.builtin.command: > + k3s kubectl -n {{ rook_ceph_namespace }} + get secret rook-ceph-dashboard-password + -o jsonpath='{.data.password}' + register: _ceph_dashboard_password + changed_when: false + failed_when: false + +- name: Show Rook-Ceph access info + ansible.builtin.debug: + msg: + - "Rook-Ceph установлен в namespace: {{ rook_ceph_namespace }}" + - "StorageClass (block RWO): {{ rook_ceph_block_storage_class }}" + - "StorageClass (filesystem RWX): {{ rook_ceph_filesystem_storage_class }}" + - "Replicas: {{ rook_ceph_block_replica_count }} (для single-node задай rook_ceph_block_replica_count=1)" + - "{% if rook_ceph_dashboard_ingress_enabled %}Dashboard: http{{ 's' if rook_ceph_dashboard_ingress_tls else '' }}://{{ rook_ceph_dashboard_ingress_host }}{% else %}Dashboard: kubectl port-forward svc/rook-ceph-mgr-dashboard -n {{ rook_ceph_namespace }} 7000:7000{% endif %}" + - "Dashboard логин: admin / {{ _ceph_dashboard_password.stdout | b64decode if _ceph_dashboard_password.rc == 0 else '(пока создаётся)' }}" + - "Статус кластера: kubectl -n {{ rook_ceph_namespace }} get cephcluster" + - "Toolbox: kubectl -n {{ rook_ceph_namespace }} exec -it deploy/rook-ceph-tools -- bash" diff --git a/addons/ceph-rock/role/templates/ceph-cluster.yaml.j2 b/addons/ceph-rock/role/templates/ceph-cluster.yaml.j2 new file mode 100644 index 0000000..bf08455 --- /dev/null +++ b/addons/ceph-rock/role/templates/ceph-cluster.yaml.j2 @@ -0,0 +1,127 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: {{ rook_ceph_namespace }} +spec: + cephVersion: + image: "{{ rook_ceph_image }}" + allowUnsupported: false + + dataDirHostPath: "{{ rook_ceph_data_dir }}" + + mon: + count: {{ rook_ceph_mon_count }} + allowMultiplePerNode: {{ rook_ceph_allow_multiple_mon_per_node | lower }} + + mgr: + count: 1 + modules: + - name: pg_autoscaler + enabled: true + + dashboard: + enabled: {{ rook_ceph_dashboard_enabled | lower }} + ssl: false + + monitoring: + enabled: {{ (rook_ceph_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool) | lower }} + + network: + connections: + encryption: + enabled: false + compression: + enabled: false + + storage: + useAllNodes: true + useAllDevices: {{ rook_ceph_use_all_devices | lower }} +{% if rook_ceph_devices %} + devices: +{% for dev in rook_ceph_devices %} + - name: "{{ dev }}" +{% endfor %} +{% else %} + directories: + - path: "{{ rook_ceph_data_dir }}" +{% endif %} + + placement: + all: + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 +--- +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: {{ rook_ceph_block_pool_name }} + namespace: {{ rook_ceph_namespace }} +spec: + failureDomain: host + replicated: + size: {{ rook_ceph_block_replica_count }} +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ rook_ceph_block_storage_class }} + annotations: + storageclass.kubernetes.io/is-default-class: "{{ rook_ceph_block_storage_class_default | lower }}" +provisioner: {{ rook_ceph_namespace }}.rbd.csi.ceph.com +parameters: + clusterID: {{ rook_ceph_namespace }} + pool: {{ rook_ceph_block_pool_name }} + imageFormat: "2" + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_ceph_namespace }} + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: {{ rook_ceph_namespace }} + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: {{ rook_ceph_namespace }} +reclaimPolicy: Delete +allowVolumeExpansion: true +--- +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: {{ rook_ceph_filesystem_name }} + namespace: {{ rook_ceph_namespace }} +spec: + metadataPool: + replicated: + size: {{ rook_ceph_block_replica_count }} + dataPools: + - name: data0 + replicated: + size: {{ rook_ceph_block_replica_count }} + preserveFilesystemOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: true +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ rook_ceph_filesystem_storage_class }} +provisioner: {{ rook_ceph_namespace }}.cephfs.csi.ceph.com +parameters: + clusterID: {{ rook_ceph_namespace }} + fsName: {{ rook_ceph_filesystem_name }} + pool: {{ rook_ceph_filesystem_name }}-data0 + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_ceph_namespace }} + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: {{ rook_ceph_namespace }} + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: {{ rook_ceph_namespace }} +reclaimPolicy: Delete +allowVolumeExpansion: true diff --git a/addons/ceph-rock/role/templates/ceph-dashboard-ingress.yaml.j2 b/addons/ceph-rock/role/templates/ceph-dashboard-ingress.yaml.j2 new file mode 100644 index 0000000..46d3e0d --- /dev/null +++ b/addons/ceph-rock/role/templates/ceph-dashboard-ingress.yaml.j2 @@ -0,0 +1,29 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: rook-ceph-mgr-dashboard + namespace: {{ rook_ceph_namespace }} + annotations: + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" +{% if rook_ceph_dashboard_ingress_tls %} + cert-manager.io/cluster-issuer: "{{ rook_ceph_dashboard_ingress_cert_issuer }}" +{% endif %} +spec: + ingressClassName: "{{ rook_ceph_dashboard_ingress_class }}" + rules: + - host: "{{ rook_ceph_dashboard_ingress_host }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: rook-ceph-mgr-dashboard + port: + number: 7000 +{% if rook_ceph_dashboard_ingress_tls %} + tls: + - secretName: ceph-dashboard-tls + hosts: + - "{{ rook_ceph_dashboard_ingress_host }}" +{% endif %} diff --git a/addons/cert-manager/README.md b/addons/cert-manager/README.md index 71f554b..f12a99a 100644 --- a/addons/cert-manager/README.md +++ b/addons/cert-manager/README.md @@ -121,3 +121,8 @@ kubectl logs -n cert-manager deployment/cert-manager -f kubectl get secret my-app-tls -n my-app -o jsonpath='{.data.tls\.crt}' | \ base64 -d | openssl x509 -noout -dates ``` +## Официальные ресурсы + +- Официальный сайт: [https://cert-manager.io/](https://cert-manager.io/) +- Официальная документация: [https://cert-manager.io/docs/](https://cert-manager.io/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/cert-manager/cert-manager](https://artifacthub.io/packages/helm/cert-manager/cert-manager) diff --git a/addons/crowdsec/README.md b/addons/crowdsec/README.md index 928d430..1451901 100644 --- a/addons/crowdsec/README.md +++ b/addons/crowdsec/README.md @@ -75,3 +75,8 @@ crowdsec_acquisition: podName: "gitea-*" program: gitea ``` +## Официальные ресурсы + +- Официальный сайт: [https://www.crowdsec.net/](https://www.crowdsec.net/) +- Официальная документация: [https://docs.crowdsec.net/](https://docs.crowdsec.net/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/crowdsec/crowdsec](https://artifacthub.io/packages/helm/crowdsec/crowdsec) diff --git a/addons/csi-ceph/README.md b/addons/csi-ceph/README.md index 219d798..6e863de 100644 --- a/addons/csi-ceph/README.md +++ b/addons/csi-ceph/README.md @@ -1,104 +1,175 @@ -# CSI Ceph / Rook-Ceph +# CSI Ceph (ceph-csi: RBD + CephFS) -Distributed storage на базе Ceph, управляемый Rook-оператором. Предоставляет: -- **Block storage** (RWO) — `rook-ceph-block` StorageClass -- **Filesystem storage** (RWX) — `rook-ceph-filesystem` StorageClass +Аддон разворачивает именно официальный **Ceph Container Storage Interface driver** +из репозитория [ceph/ceph-csi](https://github.com/ceph/ceph-csi/tree/devel): -Требует минимум **3 ноды** с незанятыми дисками для OSD. +- `rbd.csi.ceph.com` — блочное хранилище RBD (обычно `ReadWriteOnce`) +- `cephfs.csi.ceph.com` — файловое хранилище CephFS (обычно `ReadWriteMany`) -## Быстрый старт +## Что делает аддон + +1. Создаёт `ConfigMap ceph-csi-config` (FSID + MON endpoint’ы). +2. Применяет upstream-манифесты `ceph-csi` для RBD/CephFS (provisioner + node plugin). +3. Создаёт `Secret` для авторизации в Ceph. +4. Создаёт `StorageClass` для RBD и/или CephFS. + +Аддон **не** разворачивает Ceph-кластер (MON/OSD/MGR). Кластер Ceph должен уже существовать. + +--- + +## Предварительные требования + +- Рабочий Ceph-кластер. +- Доступ к `ceph` CLI (чтобы получить FSID, MON и CephX ключ). +- Kubernetes-кластер уже установлен (k3s). + +Нужно подготовить: + +- `clusterID` = FSID (`ceph fsid`) +- список `monitors` (`host:port`) +- CephX пользователь и ключ для CSI +- pool для RBD и (опционально) fs/pool для CephFS + +--- + +## Подготовка в Ceph (пример) + +### 1) Получить FSID + +```bash +ceph fsid +``` + +### 2) Получить список MON + +```bash +ceph mon dump +``` + +Возьми адреса в формате `IP:6789` (или ваш порт MON). + +### 3) Создать пользователя для CSI + +```bash +# пример для RBD + CephFS +ceph auth get-or-create client.kubernetes \ + mon 'allow r' \ + mgr 'allow rw' \ + osd 'allow rwx pool=rbd, allow rwx pool=cephfs_data, allow rw tag cephfs *=*' \ + mds 'allow rw' +``` + +### 4) Вытащить ключ пользователя + +```bash +ceph auth get-key client.kubernetes +``` + +Сохрани его в `vault_csi_ceph_user_key`. + +--- + +## Настройка в проекте + +### `group_vars/all/addons.yml` ```yaml -# group_vars/all/addons.yml addon_csi_ceph: true + +csi_ceph_driver_ref: "devel" +csi_ceph_namespace: "kube-system" + +csi_ceph_cluster_id: "b9127830-b0cc-4e34-aa47-9d1a2e9949a8" +csi_ceph_monitors: + - "10.0.0.11:6789" + - "10.0.0.12:6789" + - "10.0.0.13:6789" + +csi_ceph_user_id: "kubernetes" +csi_ceph_user_key: "{{ vault_csi_ceph_user_key }}" + +csi_ceph_rbd_storage_class_name: "ceph-rbd" +csi_ceph_rbd_pool: "rbd" +csi_ceph_rbd_fs_type: "ext4" + +csi_ceph_cephfs_storage_class_name: "cephfs" +csi_ceph_cephfs_fs_name: "cephfs" +csi_ceph_cephfs_pool: "cephfs_data" ``` +### `group_vars/all/vault.yml` + +```yaml +vault_csi_ceph_user_key: "" +``` + +--- + +## Установка + ```bash make addon-csi-ceph ``` -## Параметры +Проверка: -| Переменная | Умолч. | Описание | -|---|---|---| -| `rook_ceph_mon_count` | `3` | Количество MON | -| `rook_ceph_block_replica_count` | `3` | Реплики блочного хранилища | -| `rook_ceph_devices` | `[]` | Список raw-устройств для OSD | -| `rook_ceph_use_all_devices` | `false` | Авто-использовать все свободные диски | -| `rook_ceph_block_storage_class` | `rook-ceph-block` | Имя StorageClass (RWO) | -| `rook_ceph_filesystem_storage_class` | `rook-ceph-filesystem` | Имя StorageClass (RWX) | - -## Single-node конфигурация - -```yaml -rook_ceph_mon_count: 1 -rook_ceph_allow_multiple_mon_per_node: true -rook_ceph_block_replica_count: 1 +```bash +kubectl -n kube-system get pods | rg 'csi-rbd|csi-cephfs' +kubectl get sc ``` -## Использование конкретных дисков +Ожидается наличие `StorageClass`: -```yaml -rook_ceph_devices: - - "/dev/sdb" - - "/dev/sdc" -``` +- `ceph-rbd` (или имя из `csi_ceph_rbd_storage_class_name`) +- `cephfs` (или имя из `csi_ceph_cephfs_storage_class_name`) -## Использование в PVC +--- -### Block storage (RWO) +## Пример PVC (RBD) ```yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: my-db-data + name: app-rbd-data spec: accessModes: [ReadWriteOnce] - storageClassName: rook-ceph-block + storageClassName: ceph-rbd resources: requests: storage: 20Gi ``` -### Filesystem storage (RWX) +## Пример PVC (CephFS) ```yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: shared-data + name: app-cephfs-data spec: accessModes: [ReadWriteMany] - storageClassName: rook-ceph-filesystem + storageClassName: cephfs resources: requests: - storage: 50Gi + storage: 20Gi ``` -## Dashboard +--- -Включён по умолчанию. Доступ: +## Частые проблемы -```bash -kubectl -n rook-ceph port-forward svc/rook-ceph-mgr-dashboard 7000 -# http://localhost:7000 -# Логин: admin -kubectl -n rook-ceph get secret rook-ceph-dashboard-password \ - -o jsonpath='{.data.password}' | base64 -d -``` +- `failed to fetch monitor list` + Неверный `csi_ceph_cluster_id` или `csi_ceph_monitors`. -Или через Ingress: -```yaml -rook_ceph_dashboard_ingress_enabled: true -rook_ceph_dashboard_ingress_host: "ceph.example.com" -``` +- `permission denied` / `rados: ret=-13` + Недостаточные CephX права у `client.`. -## Диагностика +- PVC в `Pending` + Проверь provisioner pods (`csi-rbdplugin-provisioner`, `csi-cephfsplugin-provisioner`) и события PVC: + `kubectl describe pvc `. +## Официальные ресурсы -```bash -kubectl -n rook-ceph get cephcluster -kubectl -n rook-ceph get pods -kubectl exec -n rook-ceph deployment/rook-ceph-tools -- ceph status -kubectl exec -n rook-ceph deployment/rook-ceph-tools -- ceph osd status -``` +- Официальный сайт: [https://ceph.io/](https://ceph.io/) +- Официальная документация: [https://github.com/ceph/ceph-csi/tree/devel](https://github.com/ceph/ceph-csi/tree/devel) +- Версии Helm chart / ПО: [https://github.com/ceph/ceph-csi/releases](https://github.com/ceph/ceph-csi/releases) diff --git a/addons/csi-ceph/playbook.yml b/addons/csi-ceph/playbook.yml index 328cd5b..e923294 100644 --- a/addons/csi-ceph/playbook.yml +++ b/addons/csi-ceph/playbook.yml @@ -1,5 +1,5 @@ --- -- name: Install Csi Ceph +- name: Install CSI Ceph hosts: k3s_master[0] gather_facts: false become: true diff --git a/addons/csi-ceph/role/defaults/main.yml b/addons/csi-ceph/role/defaults/main.yml index 677c6d2..bb0f27e 100644 --- a/addons/csi-ceph/role/defaults/main.yml +++ b/addons/csi-ceph/role/defaults/main.yml @@ -1,66 +1,42 @@ --- -# Версия Helm-чарта Rook -rook_ceph_version: "1.14.0" -# Namespace Rook/Ceph -rook_ceph_namespace: "rook-ceph" -# Helm-репозиторий Rook -rook_ceph_chart_repo: "https://charts.rook.io/release" +# Версия Ceph CSI driver (ветка/тег репозитория ceph/ceph-csi) +csi_ceph_driver_ref: "devel" -# Версия образа Ceph -# Образ Ceph daemon -rook_ceph_image: "quay.io/ceph/ceph:v18.2.2" +# Namespace, где разворачиваются CSI контроллеры/DaemonSet +csi_ceph_namespace: "kube-system" -# Мониторы (MON) — для single-node: count=1, allowMultiplePerNode=true -# Количество MON -rook_ceph_mon_count: 3 -# Разрешить несколько MON на одной ноде (lab) -rook_ceph_allow_multiple_mon_per_node: false +# FSID Ceph кластера (clusterID для ceph-csi) +csi_ceph_cluster_id: "" -# Путь на хосте для хранения данных Ceph (OSD в filestore-режиме) -# Для использования raw block-устройств — оставь пустым и задай rook_ceph_devices -# Каталог данных на нодах (directory store) -rook_ceph_data_dir: "/var/lib/rook" +# Список мониторов Ceph MON в формате host:port +csi_ceph_monitors: [] +# Пример: +# csi_ceph_monitors: +# - "10.0.0.11:6789" +# - "10.0.0.12:6789" +# - "10.0.0.13:6789" -# Raw block-устройства для OSD (например ["/dev/sdb", "/dev/sdc"]) -# Если пусто — Ceph использует директорию rook_ceph_data_dir -# Список блочных устройств для OSD -rook_ceph_devices: [] +# Ceph user для CSI (обычно user с правами на pool/fs) +csi_ceph_user_id: "kubernetes" +# Ключ пользователя (ceph auth get-key client.) +csi_ceph_user_key: "{{ vault_csi_ceph_user_key | default('') }}" -# Использовать все доступные (не смонтированные) диски автоматически -rook_ceph_use_all_devices: false +# RBD StorageClass +csi_ceph_rbd_storage_class_name: "ceph-rbd" +csi_ceph_rbd_pool: "rbd" +csi_ceph_rbd_fs_type: "ext4" +csi_ceph_rbd_storage_class_default: false -# Dashboard -# Включить Ceph Dashboard -rook_ceph_dashboard_enabled: true +# CephFS StorageClass +csi_ceph_cephfs_storage_class_name: "cephfs" +csi_ceph_cephfs_fs_name: "cephfs" +csi_ceph_cephfs_pool: "cephfs_data" +csi_ceph_cephfs_storage_class_default: false -# StorageClasses -# Имя block pool -rook_ceph_block_pool_name: "replicapool" -# Число реплик в пуле -rook_ceph_block_replica_count: 3 # уменьши до 1 для single-node -# Имя StorageClass для RBD -rook_ceph_block_storage_class: "rook-ceph-block" -# Сделать RBD StorageClass default -rook_ceph_block_storage_class_default: false +# Общие параметры StorageClass +csi_ceph_reclaim_policy: "Delete" # Delete | Retain +csi_ceph_volume_binding_mode: "Immediate" # Immediate | WaitForFirstConsumer -# Имя CephFS -rook_ceph_filesystem_name: "ceph-filesystem" -# Имя StorageClass для CephFS -rook_ceph_filesystem_storage_class: "rook-ceph-filesystem" - -# Ingress для Ceph Dashboard -# Публиковать Ceph Dashboard через Ingress -rook_ceph_dashboard_ingress_enabled: false -# Хост Dashboard -rook_ceph_dashboard_ingress_host: "ceph.local" -# IngressClass -rook_ceph_dashboard_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}" -# TLS -rook_ceph_dashboard_ingress_tls: false -# Issuer cert-manager -rook_ceph_dashboard_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}" - -# Метрики -# Экспорт метрик Ceph для Prometheus -rook_ceph_metrics_enabled: true -# ServiceMonitor создаётся только когда addon_prometheus_stack: true +# Включить/выключить установку конкретного драйвера +csi_ceph_enable_rbd: true +csi_ceph_enable_cephfs: true diff --git a/addons/csi-ceph/role/molecule/default/converge.yml b/addons/csi-ceph/role/molecule/default/converge.yml index bfedad2..6a89e84 100644 --- a/addons/csi-ceph/role/molecule/default/converge.yml +++ b/addons/csi-ceph/role/molecule/default/converge.yml @@ -1,30 +1,40 @@ --- -- name: Converge — csi-ceph (rook-ceph) template tests +- name: Converge — csi-ceph (ceph-csi) template tests hosts: all become: false gather_facts: false vars: - rook_ceph_namespace: rook-ceph - rook_ceph_image: "quay.io/ceph/ceph:v18.2.2" - rook_ceph_mon_count: 3 - rook_ceph_allow_multiple_mon_per_node: false - rook_ceph_data_dir: "/var/lib/rook" - rook_ceph_devices: [] - rook_ceph_use_all_devices: false - rook_ceph_dashboard_enabled: true - rook_ceph_block_pool_name: "replicapool" - rook_ceph_block_replica_count: 3 - rook_ceph_block_storage_class: "ceph-block" - rook_ceph_block_storage_class_default: false - rook_ceph_filesystem_name: "ceph-filesystem" - rook_ceph_filesystem_storage_class: "ceph-filesystem" - rook_ceph_metrics_enabled: false - addon_prometheus_stack: false + csi_ceph_namespace: "kube-system" + csi_ceph_cluster_id: "b9127830-b0cc-4e34-aa47-9d1a2e9949a8" + csi_ceph_monitors: + - "10.0.0.11:6789" + - "10.0.0.12:6789" + - "10.0.0.13:6789" + csi_ceph_user_id: "kubernetes" + csi_ceph_user_key: "test-ceph-key" + csi_ceph_rbd_storage_class_name: "ceph-rbd" + csi_ceph_rbd_pool: "rbd" + csi_ceph_rbd_fs_type: "ext4" + csi_ceph_rbd_storage_class_default: false + csi_ceph_cephfs_storage_class_name: "cephfs" + csi_ceph_cephfs_fs_name: "cephfs" + csi_ceph_cephfs_pool: "cephfs_data" + csi_ceph_cephfs_storage_class_default: false + csi_ceph_reclaim_policy: "Delete" + csi_ceph_volume_binding_mode: "Immediate" + csi_ceph_enable_rbd: true + csi_ceph_enable_cephfs: true tasks: - - name: Render ceph-cluster.yaml.j2 + - name: Render csi-config-map.yaml.j2 ansible.builtin.template: - src: "{{ playbook_dir }}/../../templates/ceph-cluster.yaml.j2" - dest: /tmp/ceph-cluster.yaml + src: "{{ playbook_dir }}/../../templates/csi-config-map.yaml.j2" + dest: /tmp/csi-ceph-config-map.yaml + mode: "0644" + + - name: Render csi-ceph-secrets-and-sc.yaml.j2 + ansible.builtin.template: + src: "{{ playbook_dir }}/../../templates/csi-ceph-secrets-and-sc.yaml.j2" + dest: /tmp/csi-ceph-resources.yaml mode: "0644" diff --git a/addons/csi-ceph/role/molecule/default/verify.yml b/addons/csi-ceph/role/molecule/default/verify.yml index 9c159e1..9281973 100644 --- a/addons/csi-ceph/role/molecule/default/verify.yml +++ b/addons/csi-ceph/role/molecule/default/verify.yml @@ -5,46 +5,39 @@ gather_facts: false tasks: - - name: Read rendered ceph-cluster manifest + - name: Read rendered config map manifest ansible.builtin.slurp: - src: /tmp/ceph-cluster.yaml - register: manifest_raw + src: /tmp/csi-ceph-config-map.yaml + register: cfg_raw - - name: Set content fact + - name: Set config content fact ansible.builtin.set_fact: - content: "{{ manifest_raw.content | b64decode }}" + cfg: "{{ cfg_raw.content | b64decode }}" - - name: Assert CephCluster kind is present + - name: Assert ceph-csi config map and monitors are present ansible.builtin.assert: - that: "'kind: CephCluster' in content" - fail_msg: "kind: CephCluster не найден" + that: + - "'name: ceph-csi-config' in cfg" + - "'clusterID' in cfg" + - "'10.0.0.11:6789' in cfg" + fail_msg: "ConfigMap ceph-csi-config не отрендерен корректно" - - name: Assert CephBlockPool kind is present - ansible.builtin.assert: - that: "'kind: CephBlockPool' in content" - fail_msg: "kind: CephBlockPool не найден" + - name: Read rendered secrets/sc manifest + ansible.builtin.slurp: + src: /tmp/csi-ceph-resources.yaml + register: res_raw - - name: Assert StorageClass is present (for RBD) - ansible.builtin.assert: - that: "'kind: StorageClass' in content" - fail_msg: "kind: StorageClass не найден" + - name: Set resources content fact + ansible.builtin.set_fact: + content: "{{ res_raw.content | b64decode }}" - - name: Assert CephFilesystem kind is present + - name: Assert RBD and CephFS storage classes are present ansible.builtin.assert: - that: "'kind: CephFilesystem' in content" - fail_msg: "kind: CephFilesystem не найден" - - - name: Assert mon count is 3 - ansible.builtin.assert: - that: "'count: 3' in content" - fail_msg: "mon count: 3 не найден" - - - name: Assert namespace is rook-ceph - ansible.builtin.assert: - that: "'namespace: rook-ceph' in content" - fail_msg: "namespace: rook-ceph не найден" - - - name: Assert ceph image is set - ansible.builtin.assert: - that: "'quay.io/ceph/ceph' in content" - fail_msg: "ceph image не найден" + that: + - "'name: csi-rbd-secret' in content" + - "'name: csi-cephfs-secret' in content" + - "'provisioner: rbd.csi.ceph.com' in content" + - "'provisioner: cephfs.csi.ceph.com' in content" + - "'name: ceph-rbd' in content" + - "'name: cephfs' in content" + fail_msg: "StorageClass/Secret для ceph-csi не отрендерены" diff --git a/addons/csi-ceph/role/tasks/main.yml b/addons/csi-ceph/role/tasks/main.yml index 8e1cbe2..64412bb 100644 --- a/addons/csi-ceph/role/tasks/main.yml +++ b/addons/csi-ceph/role/tasks/main.yml @@ -1,98 +1,64 @@ --- -- name: Add Rook Helm repo - kubernetes.core.helm_repository: - name: rook-release - repo_url: "{{ rook_ceph_chart_repo }}" - environment: - KUBECONFIG: "{{ k3s_kubeconfig_path }}" +- name: Validate required Ceph CSI variables + ansible.builtin.assert: + that: + - csi_ceph_cluster_id | length > 0 + - csi_ceph_monitors | length > 0 + - csi_ceph_user_key | length > 0 + fail_msg: >- + Для csi-ceph нужно задать csi_ceph_cluster_id, csi_ceph_monitors и csi_ceph_user_key + (обычно в group_vars/all/addons.yml и vault.yml). -- name: Install Rook-Ceph operator via Helm - kubernetes.core.helm: - name: rook-ceph - chart_ref: rook-release/rook-ceph - chart_version: "{{ rook_ceph_version }}" - release_namespace: "{{ rook_ceph_namespace }}" - create_namespace: true - wait: true - timeout: "10m0s" - values: - tolerations: - - key: "node-role.kubernetes.io/control-plane" - operator: "Exists" - effect: "NoSchedule" - discover: - tolerations: - - operator: "Exists" - csi: - provisionerTolerations: - - key: "node-role.kubernetes.io/control-plane" - operator: "Exists" - effect: "NoSchedule" - pluginTolerations: - - operator: "Exists" - monitoring: - enabled: "{{ rook_ceph_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}" - environment: - KUBECONFIG: "{{ k3s_kubeconfig_path }}" - -- name: Wait for Rook operator to be ready - ansible.builtin.command: > - k3s kubectl -n {{ rook_ceph_namespace }} - rollout status deployment/rook-ceph-operator --timeout=180s - changed_when: false - retries: 3 - delay: 10 - -- name: Template CephCluster + StorageClasses manifest +- name: Render ceph-csi ConfigMap manifests ansible.builtin.template: - src: ceph-cluster.yaml.j2 - dest: /tmp/ceph-cluster.yaml - mode: '0644' + src: csi-config-map.yaml.j2 + dest: /tmp/csi-ceph-config-map.yaml + mode: "0644" -- name: Apply CephCluster + StorageClasses - ansible.builtin.command: k3s kubectl apply -f /tmp/ceph-cluster.yaml +- name: Apply ceph-csi ConfigMap manifests + ansible.builtin.command: k3s kubectl apply -f /tmp/csi-ceph-config-map.yaml changed_when: true -- name: Wait for Ceph monitors to be ready (может занять несколько минут) +- name: Install ceph-csi RBD provisioner plugin ansible.builtin.command: > - k3s kubectl -n {{ rook_ceph_namespace }} - wait cephcluster/rook-ceph - --for=jsonpath='{.status.phase}'=Ready - --timeout=600s - changed_when: false - retries: 5 - delay: 30 - failed_when: false - -- name: Create Ceph Dashboard Ingress - ansible.builtin.template: - src: ceph-dashboard-ingress.yaml.j2 - dest: /tmp/ceph-dashboard-ingress.yaml - mode: '0644' - when: rook_ceph_dashboard_ingress_enabled | bool - -- name: Apply Ceph Dashboard Ingress - ansible.builtin.command: k3s kubectl apply -f /tmp/ceph-dashboard-ingress.yaml + k3s kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/{{ csi_ceph_driver_ref }}/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml changed_when: true - when: rook_ceph_dashboard_ingress_enabled | bool + when: csi_ceph_enable_rbd | bool -- name: Get Ceph Dashboard admin password +- name: Install ceph-csi RBD node plugin ansible.builtin.command: > - k3s kubectl -n {{ rook_ceph_namespace }} - get secret rook-ceph-dashboard-password - -o jsonpath='{.data.password}' - register: _ceph_dashboard_password - changed_when: false - failed_when: false + k3s kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/{{ csi_ceph_driver_ref }}/deploy/rbd/kubernetes/csi-rbdplugin.yaml + changed_when: true + when: csi_ceph_enable_rbd | bool -- name: Show Rook-Ceph access info +- name: Install ceph-csi CephFS provisioner plugin + ansible.builtin.command: > + k3s kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/{{ csi_ceph_driver_ref }}/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml + changed_when: true + when: csi_ceph_enable_cephfs | bool + +- name: Install ceph-csi CephFS node plugin + ansible.builtin.command: > + k3s kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/{{ csi_ceph_driver_ref }}/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml + changed_when: true + when: csi_ceph_enable_cephfs | bool + +- name: Render ceph-csi Secrets and StorageClasses + ansible.builtin.template: + src: csi-ceph-secrets-and-sc.yaml.j2 + dest: /tmp/csi-ceph-resources.yaml + mode: "0644" + +- name: Apply ceph-csi Secrets and StorageClasses + ansible.builtin.command: k3s kubectl apply -f /tmp/csi-ceph-resources.yaml + changed_when: true + +- name: Show ceph-csi access info ansible.builtin.debug: msg: - - "Rook-Ceph установлен в namespace: {{ rook_ceph_namespace }}" - - "StorageClass (block RWO): {{ rook_ceph_block_storage_class }}" - - "StorageClass (filesystem RWX): {{ rook_ceph_filesystem_storage_class }}" - - "Replicas: {{ rook_ceph_block_replica_count }} (для single-node задай rook_ceph_block_replica_count=1)" - - "{% if rook_ceph_dashboard_ingress_enabled %}Dashboard: http{{ 's' if rook_ceph_dashboard_ingress_tls else '' }}://{{ rook_ceph_dashboard_ingress_host }}{% else %}Dashboard: kubectl port-forward svc/rook-ceph-mgr-dashboard -n {{ rook_ceph_namespace }} 7000:7000{% endif %}" - - "Dashboard логин: admin / {{ _ceph_dashboard_password.stdout | b64decode if _ceph_dashboard_password.rc == 0 else '(пока создаётся)' }}" - - "Статус кластера: kubectl -n {{ rook_ceph_namespace }} get cephcluster" - - "Toolbox: kubectl -n {{ rook_ceph_namespace }} exec -it deploy/rook-ceph-tools -- bash" + - "Ceph CSI установлен (ref={{ csi_ceph_driver_ref }})" + - "ClusterID(FSID): {{ csi_ceph_cluster_id }}" + - "RBD StorageClass: {{ csi_ceph_rbd_storage_class_name if csi_ceph_enable_rbd else 'disabled' }}" + - "CephFS StorageClass: {{ csi_ceph_cephfs_storage_class_name if csi_ceph_enable_cephfs else 'disabled' }}" + - "Пример PVC RBD: storageClassName={{ csi_ceph_rbd_storage_class_name }} accessModes=[ReadWriteOnce]" + - "Пример PVC CephFS: storageClassName={{ csi_ceph_cephfs_storage_class_name }} accessModes=[ReadWriteMany]" diff --git a/addons/csi-ceph/role/templates/csi-ceph-secrets-and-sc.yaml.j2 b/addons/csi-ceph/role/templates/csi-ceph-secrets-and-sc.yaml.j2 new file mode 100644 index 0000000..2d17988 --- /dev/null +++ b/addons/csi-ceph/role/templates/csi-ceph-secrets-and-sc.yaml.j2 @@ -0,0 +1,68 @@ +{% if csi_ceph_enable_rbd %} +apiVersion: v1 +kind: Secret +metadata: + name: csi-rbd-secret + namespace: {{ csi_ceph_namespace }} +stringData: + userID: "{{ csi_ceph_user_id }}" + userKey: "{{ csi_ceph_user_key }}" +type: Opaque +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ csi_ceph_rbd_storage_class_name }} + annotations: + storageclass.kubernetes.io/is-default-class: "{{ csi_ceph_rbd_storage_class_default | lower }}" +provisioner: rbd.csi.ceph.com +parameters: + clusterID: "{{ csi_ceph_cluster_id }}" + pool: "{{ csi_ceph_rbd_pool }}" + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret + csi.storage.k8s.io/provisioner-secret-namespace: {{ csi_ceph_namespace }} + csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret + csi.storage.k8s.io/controller-expand-secret-namespace: {{ csi_ceph_namespace }} + csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret + csi.storage.k8s.io/node-stage-secret-namespace: {{ csi_ceph_namespace }} + csi.storage.k8s.io/fstype: {{ csi_ceph_rbd_fs_type }} +reclaimPolicy: {{ csi_ceph_reclaim_policy }} +allowVolumeExpansion: true +volumeBindingMode: {{ csi_ceph_volume_binding_mode }} +{% endif %} +{% if csi_ceph_enable_rbd and csi_ceph_enable_cephfs %} +--- +{% endif %} +{% if csi_ceph_enable_cephfs %} +apiVersion: v1 +kind: Secret +metadata: + name: csi-cephfs-secret + namespace: {{ csi_ceph_namespace }} +stringData: + adminID: "{{ csi_ceph_user_id }}" + adminKey: "{{ csi_ceph_user_key }}" +type: Opaque +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ csi_ceph_cephfs_storage_class_name }} + annotations: + storageclass.kubernetes.io/is-default-class: "{{ csi_ceph_cephfs_storage_class_default | lower }}" +provisioner: cephfs.csi.ceph.com +parameters: + clusterID: "{{ csi_ceph_cluster_id }}" + fsName: "{{ csi_ceph_cephfs_fs_name }}" + pool: "{{ csi_ceph_cephfs_pool }}" + csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret + csi.storage.k8s.io/provisioner-secret-namespace: {{ csi_ceph_namespace }} + csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret + csi.storage.k8s.io/controller-expand-secret-namespace: {{ csi_ceph_namespace }} + csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret + csi.storage.k8s.io/node-stage-secret-namespace: {{ csi_ceph_namespace }} +reclaimPolicy: {{ csi_ceph_reclaim_policy }} +allowVolumeExpansion: true +volumeBindingMode: {{ csi_ceph_volume_binding_mode }} +{% endif %} diff --git a/addons/csi-ceph/role/templates/csi-config-map.yaml.j2 b/addons/csi-ceph/role/templates/csi-config-map.yaml.j2 new file mode 100644 index 0000000..30bb00c --- /dev/null +++ b/addons/csi-ceph/role/templates/csi-config-map.yaml.j2 @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ceph-csi-config + namespace: {{ csi_ceph_namespace }} +data: + config.json: |- + [ + { + "clusterID": "{{ csi_ceph_cluster_id }}", + "monitors": [{% for m in csi_ceph_monitors %}"{{ m }}"{% if not loop.last %}, {% endif %}{% endfor %}] + } + ] +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ceph-csi-encryption-kms-config + namespace: {{ csi_ceph_namespace }} +data: + config.json: |- + {} diff --git a/addons/csi-glusterfs/README.md b/addons/csi-glusterfs/README.md index 28646d0..33c22f5 100644 --- a/addons/csi-glusterfs/README.md +++ b/addons/csi-glusterfs/README.md @@ -76,3 +76,8 @@ heketi-cli --server http://192.168.1.20:8080 \ --user admin --secret "password" \ cluster list ``` +## Официальные ресурсы + +- Официальный сайт: [https://www.gluster.org/](https://www.gluster.org/) +- Официальная документация: [https://github.com/gluster/gluster-csi-driver](https://github.com/gluster/gluster-csi-driver) +- Версии Helm chart / ПО: [https://github.com/gluster/gluster-csi-driver/releases](https://github.com/gluster/gluster-csi-driver/releases) diff --git a/addons/csi-nfs/README.md b/addons/csi-nfs/README.md index 107203e..05e8f98 100644 --- a/addons/csi-nfs/README.md +++ b/addons/csi-nfs/README.md @@ -70,3 +70,8 @@ kubectl get storageclass kubectl get pvc -A ``` +## Официальные ресурсы + +- Официальный сайт: [https://kubernetes-csi.github.io/docs/drivers.html](https://kubernetes-csi.github.io/docs/drivers.html) +- Официальная документация: [https://github.com/kubernetes-csi/csi-driver-nfs](https://github.com/kubernetes-csi/csi-driver-nfs) +- Версии Helm chart / ПО: [https://github.com/kubernetes-csi/csi-driver-nfs/releases](https://github.com/kubernetes-csi/csi-driver-nfs/releases) diff --git a/addons/csi-s3/README.md b/addons/csi-s3/README.md index a4a3be1..956b896 100644 --- a/addons/csi-s3/README.md +++ b/addons/csi-s3/README.md @@ -78,3 +78,8 @@ spec: | `geesefs` | Рекомендуется, высокая производительность, поддержка POSIX | | `s3fs` | Классический FUSE S3, совместимость | | `rclone` | Поддержка многих провайдеров (GCS, Azure Blob и др.) | +## Официальные ресурсы + +- Официальный сайт: [https://github.com/ctrox/csi-s3](https://github.com/ctrox/csi-s3) +- Официальная документация: [https://github.com/ctrox/csi-s3](https://github.com/ctrox/csi-s3) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/csi-s3/csi-s3](https://artifacthub.io/packages/helm/csi-s3/csi-s3) diff --git a/addons/databasus/README.md b/addons/databasus/README.md index 6ed7466..985ff10 100644 --- a/addons/databasus/README.md +++ b/addons/databasus/README.md @@ -48,3 +48,8 @@ databasus_mysql_host: "my-mysql.db.svc.cluster.local" databasus_mysql_port: 3306 databasus_minio_host: "minio.minio.svc.cluster.local:9000" ``` +## Официальные ресурсы + +- Официальный сайт: [https://github.com/databack-io/databag](https://github.com/databack-io/databag) +- Официальная документация: [https://github.com/databack-io/databag](https://github.com/databack-io/databag) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/search?ts_query_web=databasus](https://artifacthub.io/packages/search?ts_query_web=databasus) diff --git a/addons/external-secrets/README.md b/addons/external-secrets/README.md index 5ca9586..55bf1b8 100644 --- a/addons/external-secrets/README.md +++ b/addons/external-secrets/README.md @@ -286,3 +286,8 @@ kubectl get externalsecrets --all-namespaces # Подробности (условие Ready/SecretSynced) kubectl describe externalsecret myapp-db-secret -n myapp ``` +## Официальные ресурсы + +- Официальный сайт: [https://external-secrets.io/](https://external-secrets.io/) +- Официальная документация: [https://external-secrets.io/latest/](https://external-secrets.io/latest/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/external-secrets-operator/external-secrets](https://artifacthub.io/packages/helm/external-secrets-operator/external-secrets) diff --git a/addons/gitea/README.md b/addons/gitea/README.md index df677e1..7e72263 100644 --- a/addons/gitea/README.md +++ b/addons/gitea/README.md @@ -46,7 +46,10 @@ git clone ssh://git@192.168.1.10:30022/user/repo.git ## Интеграция с PostgreSQL -При `addon_postgresql: true` Gitea автоматически использует общий PostgreSQL, создавая свою базу данных `gitea` и пользователя. +Выбор задаётся переменной `gitea_database_mode`: +- `auto` — внешняя PostgreSQL при `addon_postgresql: true`, иначе встроенная chart PostgreSQL; +- `internal` — всегда встроенная chart PostgreSQL; +- `external_postgresql` — всегда внешняя PostgreSQL (аддон `postgresql` или совместимый внешний сервис). ## Gitea Actions (GitHub Actions совместимая CI/CD) @@ -106,3 +109,8 @@ jobs: kubectl logs -n gitea deployment/gitea -f kubectl exec -n gitea deployment/gitea -- gitea admin user list ``` +## Официальные ресурсы + +- Официальный сайт: [https://about.gitea.com/](https://about.gitea.com/) +- Официальная документация: [https://docs.gitea.com/](https://docs.gitea.com/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/gitea-charts/gitea](https://artifacthub.io/packages/helm/gitea-charts/gitea) diff --git a/addons/gitea/role/defaults/main.yml b/addons/gitea/role/defaults/main.yml index cce9ff5..b965b9c 100644 --- a/addons/gitea/role/defaults/main.yml +++ b/addons/gitea/role/defaults/main.yml @@ -41,8 +41,11 @@ gitea_storage_size: "10Gi" gitea_storage_class: "" # База данных -# При addon_postgresql: true — создаётся отдельный user/db в shared PostgreSQL -# При addon_postgresql: false — используется встроенная PostgreSQL чарта +# Режим БД Gitea: +# auto — внешняя PostgreSQL при addon_postgresql=true, иначе встроенная chart PostgreSQL +# internal — всегда встроенная chart PostgreSQL +# external_postgresql — всегда внешняя PostgreSQL +gitea_database_mode: "auto" # auto | internal | external_postgresql # Имя базы данных gitea_db_name: "gitea" # Пользователь БД diff --git a/addons/gitea/role/tasks/main.yml b/addons/gitea/role/tasks/main.yml index 4c6ac7f..747d5a6 100644 --- a/addons/gitea/role/tasks/main.yml +++ b/addons/gitea/role/tasks/main.yml @@ -24,6 +24,15 @@ ansible.builtin.debug: msg: "Устанавливаю Gitea chart {{ _gitea_chart_version }}" +- name: Resolve Gitea database mode + ansible.builtin.set_fact: + _gitea_use_external_postgresql: >- + {{ + (gitea_database_mode == 'external_postgresql') + or + (gitea_database_mode == 'auto' and (addon_postgresql | default(false) | bool)) + }} + - name: Create dedicated PostgreSQL user and database for Gitea kubernetes.core.k8s: state: present @@ -71,7 +80,7 @@ value: "{{ gitea_db_name }}" environment: KUBECONFIG: "{{ k3s_kubeconfig_path }}" - when: addon_postgresql | default(false) | bool + when: _gitea_use_external_postgresql | bool - name: Wait for Gitea PostgreSQL provision Job to complete ansible.builtin.command: > @@ -79,7 +88,7 @@ wait job/gitea-pg-provision --for=condition=complete --timeout=120s changed_when: false - when: addon_postgresql | default(false) | bool + when: _gitea_use_external_postgresql | bool - name: Template Gitea values ansible.builtin.template: @@ -187,7 +196,7 @@ - "URL: http{{ 's' if gitea_ingress_tls else '' }}://{{ gitea_ingress_host }}" - "Логин: {{ gitea_admin_username }}" - "Пароль: {{ gitea_admin_password }}" - - "БД: {{ 'PostgreSQL ' + postgresql_external_host if addon_postgresql | default(false) | bool else 'встроенная PostgreSQL' }}" + - "БД: {{ 'PostgreSQL ' + postgresql_external_host if _gitea_use_external_postgresql | bool else 'встроенная PostgreSQL' }}" - "{% if gitea_ssh_enabled %}SSH клон: git clone ssh://git@{{ gitea_ingress_host }}:{{ gitea_ssh_node_port }}/user/repo.git{% else %}SSH отключён — клонирование только по HTTP{% endif %}" - "Gitea Actions: {{ 'включены' if gitea_actions_enabled else 'отключены' }}" - "{% if gitea_actions_runner_enabled %}act_runner: {{ gitea_actions_runner_replicas }} реплик (DinD: {{ gitea_actions_runner_dind_enabled }}){% else %}act_runner: не установлен{% endif %}" diff --git a/addons/gitea/role/templates/gitea-values.yaml.j2 b/addons/gitea/role/templates/gitea-values.yaml.j2 index 99d33d3..f15e593 100644 --- a/addons/gitea/role/templates/gitea-values.yaml.j2 +++ b/addons/gitea/role/templates/gitea-values.yaml.j2 @@ -15,7 +15,7 @@ gitea: SSH_PORT: "{{ gitea_ssh_node_port if gitea_ssh_enabled else 22 }}" DISABLE_SSH: "{{ 'false' if gitea_ssh_enabled else 'true' }}" -{% if addon_postgresql | default(false) | bool %} +{% if (gitea_database_mode == 'external_postgresql') or (gitea_database_mode == 'auto' and (addon_postgresql | default(false) | bool)) %} database: DB_TYPE: postgres HOST: "{{ postgresql_external_host }}:{{ postgresql_external_port }}" @@ -44,9 +44,9 @@ persistence: storageClass: "{{ gitea_storage_class }}" {% endif %} -# Встроенный PostgreSQL чарта — включаем только если addon_postgresql: false +# Встроенный PostgreSQL чарта — включаем, если не выбран внешний PostgreSQL postgresql: - enabled: {{ (not (addon_postgresql | default(false) | bool)) | lower }} + enabled: {{ (not ((gitea_database_mode == 'external_postgresql') or (gitea_database_mode == 'auto' and (addon_postgresql | default(false) | bool)))) | lower }} primary: persistence: size: "2Gi" diff --git a/addons/gitlab/README.md b/addons/gitlab/README.md new file mode 100644 index 0000000..597cedb --- /dev/null +++ b/addons/gitlab/README.md @@ -0,0 +1,47 @@ +# gitlab + +Аддон устанавливает GitLab в Kubernetes, включает GitLab Runner в pod-режиме, публикует веб-интерфейс через Ingress и использует PVC для хранения. + +## Что умеет + +- ставит GitLab Helm chart в namespace `gitlab`; +- включает встроенный `gitlab-runner` (runner-поды внутри кластера); +- создает PVC для хранения данных GitLab; +- поднимает Ingress для доступа к веб-интерфейсу; +- при `addon_postgresql: true` подключает GitLab к внешней PostgreSQL из аддона `postgresql`. + +## Быстрый старт + +```yaml +# group_vars/all/addons.yml +addon_gitlab: true +gitlab_ingress_host: "gitlab.home.local" +gitlab_domain: "home.local" +gitlab_runner_install: true +gitlab_runner_replicas: 2 +``` + +```yaml +# group_vars/all/vault.yml +vault_gitlab_admin_password: "очень-сложный-пароль" +vault_gitlab_db_password: "пароль-для-gitlab-db" +``` + +Установка: + +```bash +make addon-gitlab +``` + +## PostgreSQL: встроенная или внешняя + +- Выбор задаётся переменной `gitlab_database_mode`: + - `auto` — внешняя PostgreSQL при `addon_postgresql: true`, иначе встроенная chart PostgreSQL; + - `internal` — всегда встроенная chart PostgreSQL; + - `external_postgresql` — всегда внешняя PostgreSQL (из addon `postgresql` или совместимого сервиса). + +## Официальные ресурсы + +- Официальный сайт: [https://about.gitlab.com/](https://about.gitlab.com/) +- Официальная документация: [https://docs.gitlab.com/charts/](https://docs.gitlab.com/charts/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/gitlab/gitlab](https://artifacthub.io/packages/helm/gitlab/gitlab) diff --git a/addons/gitlab/playbook.yml b/addons/gitlab/playbook.yml new file mode 100644 index 0000000..ba3be79 --- /dev/null +++ b/addons/gitlab/playbook.yml @@ -0,0 +1,7 @@ +--- +- name: Install GitLab + hosts: k3s_master[0] + gather_facts: false + become: true + roles: + - role: "{{ playbook_dir }}/role" diff --git a/addons/gitlab/role/defaults/main.yml b/addons/gitlab/role/defaults/main.yml new file mode 100644 index 0000000..cbbe2dc --- /dev/null +++ b/addons/gitlab/role/defaults/main.yml @@ -0,0 +1,47 @@ +--- +# Версия Helm-чарта GitLab; пусто — последняя доступная +gitlab_version: "" +# Namespace установки GitLab +gitlab_namespace: "gitlab" +# Helm-репозиторий GitLab +gitlab_chart_repo: "https://charts.gitlab.io/" + +# Включить Ingress для GitLab +gitlab_ingress_enabled: true +# Основной хост веб-интерфейса GitLab +gitlab_ingress_host: "gitlab.example.com" +# IngressClass +gitlab_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}" +# TLS на ingress +gitlab_ingress_tls: false +# Базовый домен для внутренних сервисов GitLab chart +gitlab_domain: "example.com" + +# Пароль root-пользователя GitLab +gitlab_admin_password: "{{ vault_gitlab_admin_password | default('changeme-gitlab') }}" + +# StorageClass для PVC GitLab +gitlab_storage_class: "" +# Размер PVC Gitaly (репозитории) +gitlab_gitaly_storage_size: "20Gi" + +# Устанавливать встроенный gitlab-runner +gitlab_runner_install: true +# Число pod-реплик runner +gitlab_runner_replicas: 2 +# Параллелизм заданий runner +gitlab_runner_concurrent: 10 + +# Параметры внешней PostgreSQL (используются только если addon_postgresql=true) +# Режим БД GitLab: +# auto — внешняя PostgreSQL при addon_postgresql=true, иначе встроенная chart PostgreSQL +# internal — всегда встроенная chart PostgreSQL +# external_postgresql — всегда внешняя PostgreSQL +gitlab_database_mode: "auto" # auto | internal | external_postgresql +# Параметры внешней PostgreSQL (используются при gitlab_database_mode=external_postgresql +# или auto+addon_postgresql=true) +gitlab_db_name: "gitlabhq_production" +# Пользователь БД GitLab +gitlab_db_username: "gitlab" +# Пароль пользователя БД GitLab +gitlab_db_password: "{{ vault_gitlab_db_password | default('changeme-gitlab-db') }}" diff --git a/addons/gitlab/role/molecule/default/converge.yml b/addons/gitlab/role/molecule/default/converge.yml new file mode 100644 index 0000000..8ec7645 --- /dev/null +++ b/addons/gitlab/role/molecule/default/converge.yml @@ -0,0 +1,20 @@ +--- +- name: Converge — gitlab defaults validation + hosts: all + gather_facts: false + become: false + vars: + gitlab_namespace: "gitlab" + gitlab_ingress_host: "gitlab.home.local" + gitlab_runner_install: true + gitlab_runner_replicas: 2 + tasks: + - name: Write variables to fact file for verification + ansible.builtin.copy: + dest: /tmp/gitlab-facts.yaml + mode: "0644" + content: | + gitlab_namespace: "{{ gitlab_namespace }}" + gitlab_ingress_host: "{{ gitlab_ingress_host }}" + gitlab_runner_install: "{{ gitlab_runner_install }}" + gitlab_runner_replicas: "{{ gitlab_runner_replicas }}" diff --git a/addons/gitlab/role/molecule/default/molecule.yml b/addons/gitlab/role/molecule/default/molecule.yml new file mode 100644 index 0000000..39283c5 --- /dev/null +++ b/addons/gitlab/role/molecule/default/molecule.yml @@ -0,0 +1,27 @@ +--- +driver: + name: docker + +platforms: + - name: master01 + image: geerlingguy/docker-ubuntu2204-ansible:latest + pre_build_image: true + groups: + - k3s_master + +provisioner: + name: ansible + playbooks: + converge: converge.yml + verify: verify.yml + config_options: + defaults: + interpreter_python: auto_silent + +verifier: + name: ansible + +lint: | + set -e + yamllint . + ansible-lint diff --git a/addons/gitlab/role/molecule/default/verify.yml b/addons/gitlab/role/molecule/default/verify.yml new file mode 100644 index 0000000..2485789 --- /dev/null +++ b/addons/gitlab/role/molecule/default/verify.yml @@ -0,0 +1,26 @@ +--- +- name: Verify — gitlab defaults + hosts: all + gather_facts: false + become: false + tasks: + - name: Read facts file + ansible.builtin.slurp: + src: /tmp/gitlab-facts.yaml + register: facts_raw + + - name: Parse facts + ansible.builtin.set_fact: + v: "{{ facts_raw.content | b64decode | from_yaml }}" + + - name: Assert gitlab_namespace + ansible.builtin.assert: + that: v.gitlab_namespace == "gitlab" + + - name: Assert gitlab_runner_install + ansible.builtin.assert: + that: (v.gitlab_runner_install | string | lower) == "true" + + - name: Assert gitlab_runner_replicas + ansible.builtin.assert: + that: (v.gitlab_runner_replicas | int) == 2 diff --git a/addons/gitlab/role/tasks/main.yml b/addons/gitlab/role/tasks/main.yml new file mode 100644 index 0000000..65e045d --- /dev/null +++ b/addons/gitlab/role/tasks/main.yml @@ -0,0 +1,143 @@ +--- +- name: Add GitLab Helm repo + kubernetes.core.helm_repository: + name: gitlab + repo_url: "{{ gitlab_chart_repo }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Fetch latest GitLab chart version + ansible.builtin.command: helm search repo gitlab/gitlab --output json + register: _gitlab_chart_search + changed_when: false + when: gitlab_version == "" + +- name: Set effective GitLab chart version + ansible.builtin.set_fact: + _gitlab_chart_version: >- + {{ gitlab_version if gitlab_version != '' else (_gitlab_chart_search.stdout | from_json)[0].version }} + +- name: Resolve GitLab database mode + ansible.builtin.set_fact: + _gitlab_use_external_postgresql: >- + {{ + (gitlab_database_mode == 'external_postgresql') + or + (gitlab_database_mode == 'auto' and (addon_postgresql | default(false) | bool)) + }} + +- name: Create secret for initial GitLab root password + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: gitlab-initial-root-password + namespace: "{{ gitlab_namespace }}" + type: Opaque + stringData: + password: "{{ gitlab_admin_password }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Create dedicated PostgreSQL user and database for GitLab + kubernetes.core.k8s: + state: present + definition: + apiVersion: batch/v1 + kind: Job + metadata: + name: gitlab-pg-provision + namespace: "{{ postgresql_namespace | default('postgresql') }}" + spec: + ttlSecondsAfterFinished: 300 + template: + spec: + restartPolicy: OnFailure + containers: + - name: psql + image: postgres:16-alpine + command: + - /bin/sh + - -c + - | + PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c " + DO \$\$ + BEGIN + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '${DB_USER}') THEN + CREATE USER ${DB_USER} WITH PASSWORD '${DB_PASS}'; + END IF; + END \$\$; + " && + PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -tc \ + "SELECT 1 FROM pg_database WHERE datname = '${DB_NAME}'" \ + | grep -q 1 || \ + PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c \ + "CREATE DATABASE ${DB_NAME} OWNER ${DB_USER};" + env: + - name: HOST + value: "{{ postgresql_external_host }}" + - name: ADMIN_PASS + value: "{{ vault_postgresql_postgres_password | default('changeme-postgres') }}" + - name: DB_USER + value: "{{ gitlab_db_username }}" + - name: DB_PASS + value: "{{ gitlab_db_password }}" + - name: DB_NAME + value: "{{ gitlab_db_name }}" + when: _gitlab_use_external_postgresql | bool + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Wait for GitLab PostgreSQL provision Job to complete + ansible.builtin.command: > + k3s kubectl -n {{ postgresql_namespace | default('postgresql') }} + wait job/gitlab-pg-provision --for=condition=complete --timeout=120s + changed_when: false + when: _gitlab_use_external_postgresql | bool + +- name: Create secret for external PostgreSQL password + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + metadata: + name: gitlab-external-db + namespace: "{{ gitlab_namespace }}" + type: Opaque + stringData: + password: "{{ gitlab_db_password }}" + when: _gitlab_use_external_postgresql | bool + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Template GitLab values + ansible.builtin.template: + src: gitlab-values.yaml.j2 + dest: /tmp/gitlab-values.yaml + mode: "0644" + +- name: Install GitLab via Helm + kubernetes.core.helm: + name: gitlab + chart_ref: gitlab/gitlab + chart_version: "{{ _gitlab_chart_version }}" + release_namespace: "{{ gitlab_namespace }}" + create_namespace: true + wait: true + timeout: "20m0s" + values_files: + - /tmp/gitlab-values.yaml + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Show GitLab access info + ansible.builtin.debug: + msg: + - "GitLab установлен в namespace: {{ gitlab_namespace }}" + - "URL: http{{ 's' if gitlab_ingress_tls else '' }}://{{ gitlab_ingress_host }}" + - "root пароль: {{ gitlab_admin_password }}" + - "Runner: {{ 'включен' if gitlab_runner_install else 'выключен' }}" + - "БД: {{ 'внешняя PostgreSQL' if _gitlab_use_external_postgresql | bool else 'встроенная PostgreSQL chart' }}" diff --git a/addons/gitlab/role/templates/gitlab-values.yaml.j2 b/addons/gitlab/role/templates/gitlab-values.yaml.j2 new file mode 100644 index 0000000..f16a379 --- /dev/null +++ b/addons/gitlab/role/templates/gitlab-values.yaml.j2 @@ -0,0 +1,55 @@ +global: + edition: ce + hosts: + domain: "{{ gitlab_domain }}" + gitlab: + name: "{{ gitlab_ingress_host.split('.')[0] }}" + ingress: + enabled: {{ gitlab_ingress_enabled | bool | lower }} + class: "{{ gitlab_ingress_class }}" + configureCertmanager: false + tls: + enabled: {{ gitlab_ingress_tls | bool | lower }} + appConfig: + initialRootPassword: + secret: gitlab-initial-root-password + key: password +{% if (gitlab_database_mode == 'external_postgresql') or (gitlab_database_mode == 'auto' and (addon_postgresql | default(false) | bool)) %} + psql: + host: "{{ postgresql_external_host }}" + port: {{ postgresql_external_port | default(5432) }} + username: "{{ gitlab_db_username }}" + database: "{{ gitlab_db_name }}" + password: + secret: gitlab-external-db + key: password +{% endif %} + +certmanager: + install: false + +nginx-ingress: + enabled: false + +gitlab: + webservice: + ingress: + enabled: {{ gitlab_ingress_enabled | bool | lower }} + gitaly: + persistence: + size: "{{ gitlab_gitaly_storage_size }}" +{% if gitlab_storage_class | length > 0 %} + storageClass: "{{ gitlab_storage_class }}" +{% endif %} + +gitlab-runner: + install: {{ gitlab_runner_install | bool | lower }} + replicas: {{ gitlab_runner_replicas }} + concurrent: {{ gitlab_runner_concurrent }} + rbac: + create: true + runners: + privileged: true + +postgresql: + install: {{ (not ((gitlab_database_mode == 'external_postgresql') or (gitlab_database_mode == 'auto' and (addon_postgresql | default(false) | bool)))) | lower }} diff --git a/addons/harbor/README.md b/addons/harbor/README.md index 7b71978..1a7f7f4 100644 --- a/addons/harbor/README.md +++ b/addons/harbor/README.md @@ -28,12 +28,15 @@ make addon-harbor | `harbor_ingress_host` | `harbor.local` | Hostname (обязательно) | | `harbor_ingress_tls` | `false` | TLS через cert-manager | | `harbor_registry_storage_size` | `20Gi` | PVC для образов | -| `harbor_database_type` | авто | internal \| external | +| `harbor_database_mode` | `auto` | `auto` \| `internal` \| `external_postgresql` | | `harbor_proxy_cache_enabled` | `true` | Proxy cache для публичных registry | ## Интеграция с PostgreSQL -При `addon_postgresql: true` Harbor автоматически использует общий PostgreSQL. +Выбор задаётся переменной `harbor_database_mode`: +- `auto` — внешняя PostgreSQL при `addon_postgresql: true`, иначе встроенная PostgreSQL чарта Harbor; +- `internal` — всегда встроенная PostgreSQL; +- `external_postgresql` — всегда внешняя PostgreSQL. ## Proxy Cache @@ -99,3 +102,8 @@ spec: kubectl get pods -n harbor kubectl logs -n harbor deployment/harbor-core -f ``` +## Официальные ресурсы + +- Официальный сайт: [https://goharbor.io/](https://goharbor.io/) +- Официальная документация: [https://goharbor.io/docs/](https://goharbor.io/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/harbor/harbor](https://artifacthub.io/packages/helm/harbor/harbor) diff --git a/addons/harbor/role/defaults/main.yml b/addons/harbor/role/defaults/main.yml index 797291a..909117b 100644 --- a/addons/harbor/role/defaults/main.yml +++ b/addons/harbor/role/defaults/main.yml @@ -33,9 +33,21 @@ harbor_storage_class: "" # "" = default StorageClass # База данных # internal — встроенная PostgreSQL (1Gi PVC) -# external — addon_postgresql (автоматически при addon_postgresql: true) +# external — внешняя PostgreSQL (например из addon_postgresql) +# Режим БД Harbor: +# auto — external при addon_postgresql=true, иначе internal +# internal — всегда встроенная PostgreSQL чарта Harbor +# external_postgresql — всегда внешняя PostgreSQL +harbor_database_mode: "auto" # auto | internal | external_postgresql # Тип БД: встроенная PostgreSQL в чарте или внешняя -harbor_database_type: "{{ 'external' if addon_postgresql | default(false) | bool else 'internal' }}" +harbor_database_type: >- + {{ + 'external' + if (harbor_database_mode == 'external_postgresql' + or (harbor_database_mode == 'auto' and (addon_postgresql | default(false) | bool)) + ) + else 'internal' + }} # Размер PVC PostgreSQL при internal harbor_database_storage_size: "1Gi" # используется только при type: internal # Хост внешней PostgreSQL diff --git a/addons/hysteria2-server/README.md b/addons/hysteria2-server/README.md index a497d33..7f9519f 100644 --- a/addons/hysteria2-server/README.md +++ b/addons/hysteria2-server/README.md @@ -264,3 +264,8 @@ make addon-hysteria2-server ```yaml hysteria2_server_version: "app/v2.5.1" ``` +## Официальные ресурсы + +- Официальный сайт: [https://hysteria.network/](https://hysteria.network/) +- Официальная документация: [https://v2.hysteria.network/docs/getting-started/Installation/](https://v2.hysteria.network/docs/getting-started/Installation/) +- Версии Helm chart / ПО: [https://github.com/apernet/hysteria/releases](https://github.com/apernet/hysteria/releases) diff --git a/addons/ingress-add-domains/README.md b/addons/ingress-add-domains/README.md index 80108f7..f5b8b32 100644 --- a/addons/ingress-add-domains/README.md +++ b/addons/ingress-add-domains/README.md @@ -212,3 +212,8 @@ kubectl -n ingress-nginx logs -l app.kubernetes.io/name=ingress-nginx --tail=50 # Проверить auth Secret kubectl -n get secret -auth ``` +## Официальные ресурсы + +- Официальный сайт: [https://kubernetes.io/docs/concepts/services-networking/ingress/](https://kubernetes.io/docs/concepts/services-networking/ingress/) +- Официальная документация: [https://kubernetes.io/docs/concepts/services-networking/ingress/](https://kubernetes.io/docs/concepts/services-networking/ingress/) +- Версии Helm chart / ПО: [https://kubernetes.github.io/ingress-nginx/deploy/](https://kubernetes.github.io/ingress-nginx/deploy/) diff --git a/addons/ingress-nginx/README.md b/addons/ingress-nginx/README.md index ccde562..66bbb2c 100644 --- a/addons/ingress-nginx/README.md +++ b/addons/ingress-nginx/README.md @@ -124,3 +124,8 @@ ingress_nginx_https_nodeport: 30443 ```yaml ingress_nginx_use_daemonset: true ``` +## Официальные ресурсы + +- Официальный сайт: [https://kubernetes.github.io/ingress-nginx/](https://kubernetes.github.io/ingress-nginx/) +- Официальная документация: [https://kubernetes.github.io/ingress-nginx/user-guide/](https://kubernetes.github.io/ingress-nginx/user-guide/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx](https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx) diff --git a/addons/ingress-proxypass/README.md b/addons/ingress-proxypass/README.md index d03ebe5..443e9d6 100644 --- a/addons/ingress-proxypass/README.md +++ b/addons/ingress-proxypass/README.md @@ -723,3 +723,8 @@ make addon-ingress-proxypass ARGS="-e ingress_proxypass_vip=..." # с явны | `ingress_proxypass_proxies` | `[]` | Список определений внешних сервисов | | `ingress_proxypass_defaults.*` | см. defaults | Глобальные значения по умолчанию | | `ingress_proxypass_vip` | `""` | kube-vip VIP — отображается в сводке после установки | +## Официальные ресурсы + +- Официальный сайт: [https://kubernetes.io/docs/concepts/services-networking/ingress/](https://kubernetes.io/docs/concepts/services-networking/ingress/) +- Официальная документация: [https://kubernetes.io/docs/concepts/services-networking/ingress/](https://kubernetes.io/docs/concepts/services-networking/ingress/) +- Версии Helm chart / ПО: [https://kubernetes.github.io/ingress-nginx/deploy/](https://kubernetes.github.io/ingress-nginx/deploy/) diff --git a/addons/istio/README.md b/addons/istio/README.md index 945cba5..d623e49 100644 --- a/addons/istio/README.md +++ b/addons/istio/README.md @@ -151,3 +151,8 @@ istioctl proxy-status istioctl analyze -n my-app kubectl logs -n istio-system deployment/istiod -f ``` +## Официальные ресурсы + +- Официальный сайт: [https://istio.io/](https://istio.io/) +- Официальная документация: [https://istio.io/latest/docs/](https://istio.io/latest/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/istio-official/base](https://artifacthub.io/packages/helm/istio-official/base) diff --git a/addons/jenkins/README.md b/addons/jenkins/README.md index 9bac80d..40073a7 100644 --- a/addons/jenkins/README.md +++ b/addons/jenkins/README.md @@ -170,3 +170,8 @@ SMTP настройки: Jenkins → Manage → Configure System → Extended E- kubectl logs -n jenkins statefulset/jenkins -f kubectl get pods -n jenkins ``` +## Официальные ресурсы + +- Официальный сайт: [https://www.jenkins.io/](https://www.jenkins.io/) +- Официальная документация: [https://www.jenkins.io/doc/](https://www.jenkins.io/doc/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/jenkinsci/jenkins](https://artifacthub.io/packages/helm/jenkinsci/jenkins) diff --git a/addons/kafka-ui/README.md b/addons/kafka-ui/README.md new file mode 100644 index 0000000..cf9abb7 --- /dev/null +++ b/addons/kafka-ui/README.md @@ -0,0 +1,37 @@ +# kafka-ui + +Аддон устанавливает Kafka UI с формой входа (логин/пароль) для доступа к интерфейсу. + +## Что делает + +- устанавливает Kafka UI через Helm; +- настраивает подключение к Kafka bootstrap server; +- включает авторизацию (`LOGIN_FORM`); +- публикует интерфейс через Ingress. + +## Конфигурация + +```yaml +# group_vars/all/addons.yml +addon_kafka_ui: true +kafka_ui_ingress_host: "kafka-ui.home.local" +kafka_ui_bootstrap_servers: "kafka.kafka.svc.cluster.local:9092" +kafka_ui_auth_username: "admin" +``` + +```yaml +# group_vars/all/vault.yml +vault_kafka_ui_password: "очень-сложный-пароль" +``` + +Установка: + +```bash +make addon-kafka-ui +``` + +## Официальные ресурсы + +- Официальный сайт: [https://github.com/provectus/kafka-ui](https://github.com/provectus/kafka-ui) +- Официальная документация: [https://github.com/provectus/kafka-ui/blob/master/documentation/compose/kafka-ui.yaml](https://github.com/provectus/kafka-ui/blob/master/documentation/compose/kafka-ui.yaml) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/kafka-ui/kafka-ui](https://artifacthub.io/packages/helm/kafka-ui/kafka-ui) diff --git a/addons/kafka-ui/playbook.yml b/addons/kafka-ui/playbook.yml new file mode 100644 index 0000000..03facf0 --- /dev/null +++ b/addons/kafka-ui/playbook.yml @@ -0,0 +1,7 @@ +--- +- name: Install Kafka UI + hosts: k3s_master[0] + gather_facts: false + become: true + roles: + - role: "{{ playbook_dir }}/role" diff --git a/addons/kafka-ui/role/defaults/main.yml b/addons/kafka-ui/role/defaults/main.yml new file mode 100644 index 0000000..709c6ae --- /dev/null +++ b/addons/kafka-ui/role/defaults/main.yml @@ -0,0 +1,26 @@ +--- +# Версия Helm-чарта Kafka UI +kafka_ui_version: "0.7.6" +# Namespace +kafka_ui_namespace: "kafka-ui" +# Helm-репозиторий Kafka UI +kafka_ui_chart_repo: "https://provectus.github.io/kafka-ui-charts" + +# Включить Ingress +kafka_ui_ingress_enabled: true +# Хост ingress +kafka_ui_ingress_host: "kafka-ui.example.com" +# IngressClass +kafka_ui_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}" +# TLS на ingress +kafka_ui_ingress_tls: false + +# Имя Kafka-кластера в UI +kafka_ui_cluster_name: "k3s-kafka" +# Bootstrap server Kafka +kafka_ui_bootstrap_servers: "kafka.kafka.svc.cluster.local:9092" + +# Логин для входа в UI +kafka_ui_auth_username: "admin" +# Пароль для входа в UI +kafka_ui_auth_password: "{{ vault_kafka_ui_password | default('changeme-kafka-ui') }}" diff --git a/addons/kafka-ui/role/molecule/default/converge.yml b/addons/kafka-ui/role/molecule/default/converge.yml new file mode 100644 index 0000000..2e048ab --- /dev/null +++ b/addons/kafka-ui/role/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge kafka-ui defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.copy: + dest: /tmp/kafka-ui-facts.yaml + mode: "0644" + content: | + kafka_ui_ingress_host: "kafka-ui.example.com" + kafka_ui_auth_username: "admin" diff --git a/addons/kafka-ui/role/molecule/default/molecule.yml b/addons/kafka-ui/role/molecule/default/molecule.yml new file mode 100644 index 0000000..561d789 --- /dev/null +++ b/addons/kafka-ui/role/molecule/default/molecule.yml @@ -0,0 +1,14 @@ +--- +driver: + name: docker +platforms: + - name: master01 + image: geerlingguy/docker-ubuntu2204-ansible:latest + pre_build_image: true +provisioner: + name: ansible + playbooks: + converge: converge.yml + verify: verify.yml +verifier: + name: ansible diff --git a/addons/kafka-ui/role/molecule/default/verify.yml b/addons/kafka-ui/role/molecule/default/verify.yml new file mode 100644 index 0000000..407225c --- /dev/null +++ b/addons/kafka-ui/role/molecule/default/verify.yml @@ -0,0 +1,14 @@ +--- +- name: Verify kafka-ui defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.slurp: + src: /tmp/kafka-ui-facts.yaml + register: facts_raw + - ansible.builtin.set_fact: + v: "{{ facts_raw.content | b64decode | from_yaml }}" + - ansible.builtin.assert: + that: + - v.kafka_ui_ingress_host == "kafka-ui.example.com" + - v.kafka_ui_auth_username == "admin" diff --git a/addons/kafka-ui/role/tasks/main.yml b/addons/kafka-ui/role/tasks/main.yml new file mode 100644 index 0000000..d39e0fe --- /dev/null +++ b/addons/kafka-ui/role/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- name: Add Kafka UI Helm repo + kubernetes.core.helm_repository: + name: kafka-ui + repo_url: "{{ kafka_ui_chart_repo }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Template Kafka UI values + ansible.builtin.template: + src: kafka-ui-values.yaml.j2 + dest: /tmp/kafka-ui-values.yaml + mode: "0644" + +- name: Install Kafka UI via Helm + kubernetes.core.helm: + name: kafka-ui + chart_ref: kafka-ui/kafka-ui + chart_version: "{{ kafka_ui_version }}" + release_namespace: "{{ kafka_ui_namespace }}" + create_namespace: true + wait: true + timeout: "10m0s" + values_files: + - /tmp/kafka-ui-values.yaml + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" diff --git a/addons/kafka-ui/role/templates/kafka-ui-values.yaml.j2 b/addons/kafka-ui/role/templates/kafka-ui-values.yaml.j2 new file mode 100644 index 0000000..a0af113 --- /dev/null +++ b/addons/kafka-ui/role/templates/kafka-ui-values.yaml.j2 @@ -0,0 +1,25 @@ +yamlApplicationConfig: + auth: + type: LOGIN_FORM + spring: + security: + user: + name: "{{ kafka_ui_auth_username }}" + password: "{{ kafka_ui_auth_password }}" + kafka: + clusters: + - name: "{{ kafka_ui_cluster_name }}" + bootstrapServers: "{{ kafka_ui_bootstrap_servers }}" + +envs: + config: + AUTH_TYPE: "LOGIN_FORM" + SPRING_SECURITY_USER_NAME: "{{ kafka_ui_auth_username }}" + SPRING_SECURITY_USER_PASSWORD: "{{ kafka_ui_auth_password }}" + +ingress: + enabled: {{ kafka_ui_ingress_enabled | bool | lower }} + ingressClassName: "{{ kafka_ui_ingress_class }}" + host: "{{ kafka_ui_ingress_host }}" + tls: + enabled: {{ kafka_ui_ingress_tls | bool | lower }} diff --git a/addons/kafka/README.md b/addons/kafka/README.md new file mode 100644 index 0000000..8eb36b0 --- /dev/null +++ b/addons/kafka/README.md @@ -0,0 +1,30 @@ +# kafka + +Аддон устанавливает Apache Kafka в Kubernetes в режимах `standalone` или `cluster` (KRaft, без ZooKeeper), с PVC. + +```yaml +addon_kafka: true +kafka_mode: "standalone" # standalone | cluster +kafka_storage_size: "20Gi" +``` + +Для кластерного режима: + +```yaml +kafka_mode: "cluster" +kafka_controller_replica_count: 3 +kafka_broker_replica_count: 3 +``` + +Установка: + +```bash +make addon-kafka +``` + +## Официальные ресурсы + +- Официальный сайт: [https://kafka.apache.org/](https://kafka.apache.org/) +- Официальная документация: [https://kafka.apache.org/documentation/](https://kafka.apache.org/documentation/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/bitnami/kafka](https://artifacthub.io/packages/helm/bitnami/kafka) + diff --git a/addons/kafka/playbook.yml b/addons/kafka/playbook.yml new file mode 100644 index 0000000..ac555bd --- /dev/null +++ b/addons/kafka/playbook.yml @@ -0,0 +1,7 @@ +--- +- name: Install Kafka + hosts: k3s_master[0] + gather_facts: false + become: true + roles: + - role: "{{ playbook_dir }}/role" diff --git a/addons/kafka/role/defaults/main.yml b/addons/kafka/role/defaults/main.yml new file mode 100644 index 0000000..70aeb2f --- /dev/null +++ b/addons/kafka/role/defaults/main.yml @@ -0,0 +1,29 @@ +--- +# Версия чарта Kafka +kafka_version: "30.1.8" +# Namespace +kafka_namespace: "kafka" +# Репозиторий Bitnami +kafka_chart_repo: "https://charts.bitnami.com/bitnami" + +# Режим: standalone | cluster +kafka_mode: "standalone" + +# StorageClass +kafka_storage_class: "" +# Размер PVC +kafka_storage_size: "20Gi" + +# Включить клиентскую аутентификацию SASL/SCRAM +kafka_auth_enabled: false +# Пароли клиентов Kafka +kafka_client_passwords: + - "{{ vault_kafka_client_password | default('changeme-kafka-client') }}" + +# Реплики контроллеров KRaft (для cluster) +kafka_controller_replica_count: 3 +# Реплики брокеров (для cluster) +kafka_broker_replica_count: 3 + +# Метрики +kafka_metrics_enabled: true diff --git a/addons/kafka/role/molecule/default/converge.yml b/addons/kafka/role/molecule/default/converge.yml new file mode 100644 index 0000000..292c7cf --- /dev/null +++ b/addons/kafka/role/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: Converge kafka defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.copy: + dest: /tmp/kafka-facts.yaml + mode: "0644" + content: | + kafka_mode: "standalone" diff --git a/addons/kafka/role/molecule/default/molecule.yml b/addons/kafka/role/molecule/default/molecule.yml new file mode 100644 index 0000000..561d789 --- /dev/null +++ b/addons/kafka/role/molecule/default/molecule.yml @@ -0,0 +1,14 @@ +--- +driver: + name: docker +platforms: + - name: master01 + image: geerlingguy/docker-ubuntu2204-ansible:latest + pre_build_image: true +provisioner: + name: ansible + playbooks: + converge: converge.yml + verify: verify.yml +verifier: + name: ansible diff --git a/addons/kafka/role/molecule/default/verify.yml b/addons/kafka/role/molecule/default/verify.yml new file mode 100644 index 0000000..9ce569f --- /dev/null +++ b/addons/kafka/role/molecule/default/verify.yml @@ -0,0 +1,13 @@ +--- +- name: Verify kafka defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.slurp: + src: /tmp/kafka-facts.yaml + register: facts_raw + - ansible.builtin.set_fact: + v: "{{ facts_raw.content | b64decode | from_yaml }}" + - ansible.builtin.assert: + that: + - v.kafka_mode == "standalone" diff --git a/addons/kafka/role/tasks/main.yml b/addons/kafka/role/tasks/main.yml new file mode 100644 index 0000000..b3b8ce0 --- /dev/null +++ b/addons/kafka/role/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: Add Bitnami Helm repo + kubernetes.core.helm_repository: + name: bitnami + repo_url: "{{ kafka_chart_repo }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Deploy Kafka via Helm + kubernetes.core.helm: + name: kafka + chart_ref: bitnami/kafka + chart_version: "{{ kafka_version }}" + release_namespace: "{{ kafka_namespace }}" + create_namespace: true + wait: true + timeout: "15m0s" + values: + kraft: + enabled: true + zookeeper: + enabled: false + controller: + replicaCount: "{{ kafka_controller_replica_count if kafka_mode == 'cluster' else 1 }}" + persistence: + enabled: true + size: "{{ kafka_storage_size }}" + storageClass: "{{ kafka_storage_class }}" + broker: + replicaCount: "{{ kafka_broker_replica_count if kafka_mode == 'cluster' else 1 }}" + persistence: + enabled: true + size: "{{ kafka_storage_size }}" + storageClass: "{{ kafka_storage_class }}" + listeners: + client: + protocol: "{{ 'SASL_PLAINTEXT' if kafka_auth_enabled else 'PLAINTEXT' }}" + sasl: + enabledMechanisms: "{{ 'SCRAM-SHA-256,SCRAM-SHA-512' if kafka_auth_enabled else '' }}" + client: + users: + - "app" + passwords: "{{ kafka_client_passwords if kafka_auth_enabled else [] }}" + metrics: + kafka: + enabled: "{{ kafka_metrics_enabled | bool }}" + jmx: + enabled: "{{ kafka_metrics_enabled | bool }}" + serviceMonitor: + enabled: "{{ kafka_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" diff --git a/addons/kubernetes-dashboard/README.md b/addons/kubernetes-dashboard/README.md index a303a75..100a104 100644 --- a/addons/kubernetes-dashboard/README.md +++ b/addons/kubernetes-dashboard/README.md @@ -57,3 +57,8 @@ kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy kubectl get pods -n kubernetes-dashboard kubectl logs -n kubernetes-dashboard deployment/kubernetes-dashboard-api -f ``` +## Официальные ресурсы + +- Официальный сайт: [https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) +- Официальная документация: [https://github.com/kubernetes/dashboard](https://github.com/kubernetes/dashboard) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/k8s-dashboard/kubernetes-dashboard](https://artifacthub.io/packages/helm/k8s-dashboard/kubernetes-dashboard) diff --git a/addons/loki/README.md b/addons/loki/README.md index 4fe9663..41896e9 100644 --- a/addons/loki/README.md +++ b/addons/loki/README.md @@ -80,3 +80,8 @@ kubectl get pods -n loki # Проверить что Promtail отправляет логи: kubectl logs -n promtail daemonset/promtail -f ``` +## Официальные ресурсы + +- Официальный сайт: [https://grafana.com/oss/loki/](https://grafana.com/oss/loki/) +- Официальная документация: [https://grafana.com/docs/loki/latest/](https://grafana.com/docs/loki/latest/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/grafana/loki](https://artifacthub.io/packages/helm/grafana/loki) diff --git a/addons/longhorn/README.md b/addons/longhorn/README.md index bf5ff89..7256c19 100644 --- a/addons/longhorn/README.md +++ b/addons/longhorn/README.md @@ -92,3 +92,8 @@ kubectl get pv,pvc -A # Longhorn API: kubectl -n longhorn-system port-forward svc/longhorn-frontend 8080:80 ``` +## Официальные ресурсы + +- Официальный сайт: [https://longhorn.io/](https://longhorn.io/) +- Официальная документация: [https://longhorn.io/docs/](https://longhorn.io/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/longhorn/longhorn](https://artifacthub.io/packages/helm/longhorn/longhorn) diff --git a/addons/mediaserver/README.md b/addons/mediaserver/README.md index 7197150..ad8732a 100644 --- a/addons/mediaserver/README.md +++ b/addons/mediaserver/README.md @@ -450,3 +450,8 @@ kubectl -n mediaserver delete secret hysteria2-config # Удалить namespace (удалит и PVC если нет ReclaimPolicy: Retain) kubectl delete namespace mediaserver ``` +## Официальные ресурсы + +- Официальный сайт: [https://www.plex.tv/](https://www.plex.tv/) +- Официальная документация: [https://docs.linuxserver.io/](https://docs.linuxserver.io/) +- Версии Helm chart / ПО: [https://docs.linuxserver.io/images/docker-plex/#versions](https://docs.linuxserver.io/images/docker-plex/#versions) diff --git a/addons/metrics-server/README.md b/addons/metrics-server/README.md index feb7e0e..64200f8 100644 --- a/addons/metrics-server/README.md +++ b/addons/metrics-server/README.md @@ -71,3 +71,8 @@ spec: updatePolicy: updateMode: Auto ``` +## Официальные ресурсы + +- Официальный сайт: [https://github.com/kubernetes-sigs/metrics-server](https://github.com/kubernetes-sigs/metrics-server) +- Официальная документация: [https://github.com/kubernetes-sigs/metrics-server](https://github.com/kubernetes-sigs/metrics-server) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/metrics-server/metrics-server](https://artifacthub.io/packages/helm/metrics-server/metrics-server) diff --git a/addons/minio/README.md b/addons/minio/README.md index 848bc1b..8668129 100644 --- a/addons/minio/README.md +++ b/addons/minio/README.md @@ -110,3 +110,8 @@ MinIO автоматически используется при включен - `addon_velero: true` — S3 backend для бэкапов кластера - `addon_csi_s3: true` — S3 как PVC - `addon_databasus: true` — S3 для бэкапов БД +## Официальные ресурсы + +- Официальный сайт: [https://min.io/](https://min.io/) +- Официальная документация: [https://min.io/docs/](https://min.io/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/bitnami/minio](https://artifacthub.io/packages/helm/bitnami/minio) diff --git a/addons/mongodb/README.md b/addons/mongodb/README.md new file mode 100644 index 0000000..e4af6c9 --- /dev/null +++ b/addons/mongodb/README.md @@ -0,0 +1,28 @@ +# mongodb + +Аддон устанавливает MongoDB в Kubernetes в режимах `standalone` или `replicaset`, с постоянным хранилищем (PVC). + +```yaml +addon_mongodb: true +mongodb_architecture: "standalone" # standalone | replicaset +mongodb_storage_size: "8Gi" +``` + +Для кластера (replicaset): + +```yaml +mongodb_architecture: "replicaset" +mongodb_replica_count: 3 +``` + +Установка: + +```bash +make addon-mongodb +``` + +## Официальные ресурсы + +- Официальный сайт: [https://www.mongodb.com/](https://www.mongodb.com/) +- Официальная документация: [https://www.mongodb.com/docs/](https://www.mongodb.com/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/bitnami/mongodb](https://artifacthub.io/packages/helm/bitnami/mongodb) diff --git a/addons/mongodb/playbook.yml b/addons/mongodb/playbook.yml new file mode 100644 index 0000000..5032b02 --- /dev/null +++ b/addons/mongodb/playbook.yml @@ -0,0 +1,7 @@ +--- +- name: Install MongoDB + hosts: k3s_master[0] + gather_facts: false + become: true + roles: + - role: "{{ playbook_dir }}/role" diff --git a/addons/mongodb/role/defaults/main.yml b/addons/mongodb/role/defaults/main.yml new file mode 100644 index 0000000..1e30113 --- /dev/null +++ b/addons/mongodb/role/defaults/main.yml @@ -0,0 +1,33 @@ +--- +# Версия чарта MongoDB +mongodb_version: "15.6.21" +# Namespace +mongodb_namespace: "mongodb" +# Репозиторий Bitnami +mongodb_chart_repo: "https://charts.bitnami.com/bitnami" + +# Архитектура: standalone | replicaset +mongodb_architecture: "standalone" + +# Включить аутентификацию +mongodb_auth_enabled: true +# Root пользователь +mongodb_root_user: "root" +# Root пароль +mongodb_root_password: "{{ vault_mongodb_root_password | default('changeme-mongodb-root') }}" +# Прикладной пользователь +mongodb_username: "appuser" +# Пароль прикладного пользователя +mongodb_password: "{{ vault_mongodb_password | default('changeme-mongodb-app') }}" +# База приложения +mongodb_database: "appdb" + +# StorageClass +mongodb_storage_class: "" +# Размер PVC +mongodb_storage_size: "8Gi" +# Число реплик в replicaset +mongodb_replica_count: 3 + +# Метрики +mongodb_metrics_enabled: true diff --git a/addons/mongodb/role/molecule/default/converge.yml b/addons/mongodb/role/molecule/default/converge.yml new file mode 100644 index 0000000..faa4756 --- /dev/null +++ b/addons/mongodb/role/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: Converge mongodb defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.copy: + dest: /tmp/mongodb-facts.yaml + mode: "0644" + content: | + mongodb_architecture: "standalone" diff --git a/addons/mongodb/role/molecule/default/molecule.yml b/addons/mongodb/role/molecule/default/molecule.yml new file mode 100644 index 0000000..561d789 --- /dev/null +++ b/addons/mongodb/role/molecule/default/molecule.yml @@ -0,0 +1,14 @@ +--- +driver: + name: docker +platforms: + - name: master01 + image: geerlingguy/docker-ubuntu2204-ansible:latest + pre_build_image: true +provisioner: + name: ansible + playbooks: + converge: converge.yml + verify: verify.yml +verifier: + name: ansible diff --git a/addons/mongodb/role/molecule/default/verify.yml b/addons/mongodb/role/molecule/default/verify.yml new file mode 100644 index 0000000..f1dd1b9 --- /dev/null +++ b/addons/mongodb/role/molecule/default/verify.yml @@ -0,0 +1,13 @@ +--- +- name: Verify mongodb defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.slurp: + src: /tmp/mongodb-facts.yaml + register: facts_raw + - ansible.builtin.set_fact: + v: "{{ facts_raw.content | b64decode | from_yaml }}" + - ansible.builtin.assert: + that: + - v.mongodb_architecture == "standalone" diff --git a/addons/mongodb/role/tasks/main.yml b/addons/mongodb/role/tasks/main.yml new file mode 100644 index 0000000..e595f28 --- /dev/null +++ b/addons/mongodb/role/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: Add Bitnami Helm repo + kubernetes.core.helm_repository: + name: bitnami + repo_url: "{{ mongodb_chart_repo }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Deploy MongoDB via Helm + kubernetes.core.helm: + name: mongodb + chart_ref: bitnami/mongodb + chart_version: "{{ mongodb_version }}" + release_namespace: "{{ mongodb_namespace }}" + create_namespace: true + wait: true + timeout: "10m0s" + values: + architecture: "{{ mongodb_architecture }}" + auth: + enabled: "{{ mongodb_auth_enabled | bool }}" + rootUser: "{{ mongodb_root_user }}" + rootPassword: "{{ mongodb_root_password }}" + usernames: + - "{{ mongodb_username }}" + passwords: + - "{{ mongodb_password }}" + databases: + - "{{ mongodb_database }}" + replicaCount: "{{ mongodb_replica_count if mongodb_architecture == 'replicaset' else 1 }}" + persistence: + enabled: true + size: "{{ mongodb_storage_size }}" + storageClass: "{{ mongodb_storage_class }}" + metrics: + enabled: "{{ mongodb_metrics_enabled | bool }}" + serviceMonitor: + enabled: "{{ mongodb_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" diff --git a/addons/mysql/README.md b/addons/mysql/README.md index 7624457..cc3c8a1 100644 --- a/addons/mysql/README.md +++ b/addons/mysql/README.md @@ -78,3 +78,8 @@ kubectl exec -i -n mysql statefulset/mysql-primary -- \ ## Использование с Databasus При `addon_databasus: true` MySQL автоматически добавляется как источник для резервного копирования. +## Официальные ресурсы + +- Официальный сайт: [https://www.mysql.com/](https://www.mysql.com/) +- Официальная документация: [https://dev.mysql.com/doc/](https://dev.mysql.com/doc/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/bitnami/mysql](https://artifacthub.io/packages/helm/bitnami/mysql) diff --git a/addons/netbird/README.md b/addons/netbird/README.md index 53dbc38..61c4d0b 100644 --- a/addons/netbird/README.md +++ b/addons/netbird/README.md @@ -111,3 +111,8 @@ kubectl logs -n netbird deployment/netbird-management -f kubectl logs -n netbird deployment/netbird-signal -f kubectl logs -n netbird deployment/netbird-coturn -f ``` +## Официальные ресурсы + +- Официальный сайт: [https://netbird.io/](https://netbird.io/) +- Официальная документация: [https://docs.netbird.io/](https://docs.netbird.io/) +- Версии Helm chart / ПО: [https://github.com/netbirdio/netbird/releases](https://github.com/netbirdio/netbird/releases) diff --git a/addons/nextcloud/README.md b/addons/nextcloud/README.md index e2a02df..43bfefc 100644 --- a/addons/nextcloud/README.md +++ b/addons/nextcloud/README.md @@ -33,7 +33,12 @@ make addon-nextcloud ## Интеграция с PostgreSQL -При `addon_postgresql: true` создаётся отдельная БД `nextcloud`. +Выбор задаётся переменной `nextcloud_database_mode`: +- `auto` — внешняя PostgreSQL при `addon_postgresql: true`, иначе встроенная SQLite; +- `sqlite` — всегда встроенная SQLite; +- `external_postgresql` — всегда внешняя PostgreSQL (аддон `postgresql` или совместимый внешний сервис). + +По умолчанию в аддоне стоит `nextcloud_database_mode: external_postgresql` (приоритет PostgreSQL). ## Подключение клиентов @@ -80,3 +85,8 @@ kubectl exec -n nextcloud deployment/nextcloud -- \ kubectl logs -n nextcloud deployment/nextcloud -f kubectl exec -n nextcloud deployment/nextcloud -- php occ check ``` +## Официальные ресурсы + +- Официальный сайт: [https://nextcloud.com/](https://nextcloud.com/) +- Официальная документация: [https://docs.nextcloud.com/](https://docs.nextcloud.com/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/nextcloud/nextcloud](https://artifacthub.io/packages/helm/nextcloud/nextcloud) diff --git a/addons/nextcloud/role/defaults/main.yml b/addons/nextcloud/role/defaults/main.yml index 4f2ed19..92b3846 100644 --- a/addons/nextcloud/role/defaults/main.yml +++ b/addons/nextcloud/role/defaults/main.yml @@ -31,8 +31,11 @@ nextcloud_storage_size: "20Gi" nextcloud_storage_class: "" # База данных -# При addon_postgresql: true — создаётся отдельный user/db в shared PostgreSQL -# При addon_postgresql: false — встроенный SQLite (не рекомендуется для prod) +# Режим БД Nextcloud: +# auto — внешняя PostgreSQL при addon_postgresql=true, иначе встроенная SQLite +# sqlite — всегда встроенная SQLite +# external_postgresql — всегда внешняя PostgreSQL +nextcloud_database_mode: "external_postgresql" # auto | sqlite | external_postgresql # Имя БД nextcloud_db_name: "nextcloud" # Пользователь БД diff --git a/addons/nextcloud/role/tasks/main.yml b/addons/nextcloud/role/tasks/main.yml index a8925c0..8771a9c 100644 --- a/addons/nextcloud/role/tasks/main.yml +++ b/addons/nextcloud/role/tasks/main.yml @@ -24,6 +24,15 @@ ansible.builtin.debug: msg: "Устанавливаю Nextcloud chart {{ _nextcloud_chart_version }}" +- name: Resolve Nextcloud database mode + ansible.builtin.set_fact: + _nextcloud_use_external_postgresql: >- + {{ + (nextcloud_database_mode == 'external_postgresql') + or + (nextcloud_database_mode == 'auto' and (addon_postgresql | default(false) | bool)) + }} + - name: Create dedicated PostgreSQL user and database for Nextcloud kubernetes.core.k8s: state: present @@ -71,7 +80,7 @@ value: "{{ nextcloud_db_name }}" environment: KUBECONFIG: "{{ k3s_kubeconfig_path }}" - when: addon_postgresql | default(false) | bool + when: _nextcloud_use_external_postgresql | bool - name: Wait for Nextcloud PostgreSQL provision Job to complete ansible.builtin.command: > @@ -79,7 +88,7 @@ wait job/nextcloud-pg-provision --for=condition=complete --timeout=120s changed_when: false - when: addon_postgresql | default(false) | bool + when: _nextcloud_use_external_postgresql | bool - name: Template Nextcloud values ansible.builtin.template: @@ -116,5 +125,5 @@ - "URL: http{{ 's' if nextcloud_ingress_tls else '' }}://{{ nextcloud_ingress_host }}" - "Логин: {{ nextcloud_admin_username }}" - "Пароль: {{ nextcloud_admin_password }}" - - "БД: {{ 'PostgreSQL ' + postgresql_external_host if addon_postgresql | default(false) | bool else 'встроенный SQLite (только для тестов!)' }}" + - "БД: {{ 'PostgreSQL ' + postgresql_external_host if _nextcloud_use_external_postgresql | bool else 'встроенный SQLite (только для тестов!)' }}" - "Для обновления до новой версии: make addon-nextcloud (nextcloud_version='' → автопоиск)" diff --git a/addons/nextcloud/role/templates/nextcloud-values.yaml.j2 b/addons/nextcloud/role/templates/nextcloud-values.yaml.j2 index 5f5736b..423aa46 100644 --- a/addons/nextcloud/role/templates/nextcloud-values.yaml.j2 +++ b/addons/nextcloud/role/templates/nextcloud-values.yaml.j2 @@ -27,7 +27,7 @@ persistence: storageClass: "{{ nextcloud_storage_class }}" {% endif %} -{% if addon_postgresql | default(false) | bool %} +{% if (nextcloud_database_mode == 'external_postgresql') or (nextcloud_database_mode == 'auto' and (addon_postgresql | default(false) | bool)) %} internalDatabase: enabled: false diff --git a/addons/nfs-server/README.md b/addons/nfs-server/README.md index 1cdedc6..d5fe88b 100644 --- a/addons/nfs-server/README.md +++ b/addons/nfs-server/README.md @@ -65,3 +65,8 @@ df -h | grep nfs kubectl get storageclass kubectl get pvc -A ``` +## Официальные ресурсы + +- Официальный сайт: [https://nfs.sourceforge.net/](https://nfs.sourceforge.net/) +- Официальная документация: [https://wiki.archlinux.org/title/NFS](https://wiki.archlinux.org/title/NFS) +- Версии Helm chart / ПО: [https://packages.ubuntu.com/search?keywords=nfs-kernel-server](https://packages.ubuntu.com/search?keywords=nfs-kernel-server) diff --git a/addons/owncloud/README.md b/addons/owncloud/README.md index f790f83..4be2139 100644 --- a/addons/owncloud/README.md +++ b/addons/owncloud/README.md @@ -69,3 +69,8 @@ kubectl exec -n owncloud deployment/owncloud -- \ kubectl get pods -n owncloud kubectl logs -n owncloud deployment/owncloud -f ``` +## Официальные ресурсы + +- Официальный сайт: [https://owncloud.com/](https://owncloud.com/) +- Официальная документация: [https://doc.owncloud.com/ocis/next/](https://doc.owncloud.com/ocis/next/) +- Версии Helm chart / ПО: [https://github.com/owncloud/ocis/releases](https://github.com/owncloud/ocis/releases) diff --git a/addons/postgresql/README.md b/addons/postgresql/README.md index 45485f4..17e2791 100644 --- a/addons/postgresql/README.md +++ b/addons/postgresql/README.md @@ -98,3 +98,8 @@ kubectl exec -i -n postgresql statefulset/postgresql-primary -- \ - `addon_harbor` — БД harbor - `addon_nextcloud` — БД nextcloud - `addon_databasus` — БД databasus +## Официальные ресурсы + +- Официальный сайт: [https://www.postgresql.org/](https://www.postgresql.org/) +- Официальная документация: [https://www.postgresql.org/docs/](https://www.postgresql.org/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/bitnami/postgresql](https://artifacthub.io/packages/helm/bitnami/postgresql) diff --git a/addons/prometheus-stack/README.md b/addons/prometheus-stack/README.md index 36e2d4a..2522b9f 100644 --- a/addons/prometheus-stack/README.md +++ b/addons/prometheus-stack/README.md @@ -142,3 +142,8 @@ sum(rate(nginx_ingress_controller_requests{status=~"5.."}[5m])) by (ingress) # Pod restarts increase(kube_pod_container_status_restarts_total[1h]) > 0 ``` +## Официальные ресурсы + +- Официальный сайт: [https://prometheus.io/](https://prometheus.io/) +- Официальная документация: [https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack](https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack) diff --git a/addons/promtail/README.md b/addons/promtail/README.md index 1e86da8..5a8f9a7 100644 --- a/addons/promtail/README.md +++ b/addons/promtail/README.md @@ -76,3 +76,8 @@ kubectl get pods -n promtail -o wide ``` В Grafana → Explore → Loki → выбери namespace → должны появиться логи. +## Официальные ресурсы + +- Официальный сайт: [https://grafana.com/docs/loki/latest/send-data/promtail/](https://grafana.com/docs/loki/latest/send-data/promtail/) +- Официальная документация: [https://grafana.com/docs/loki/latest/send-data/promtail/](https://grafana.com/docs/loki/latest/send-data/promtail/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/grafana/promtail](https://artifacthub.io/packages/helm/grafana/promtail) diff --git a/addons/pushgateway/README.md b/addons/pushgateway/README.md index b39d309..dcdecf0 100644 --- a/addons/pushgateway/README.md +++ b/addons/pushgateway/README.md @@ -97,3 +97,8 @@ EOF curl -X DELETE \ http://prometheus-pushgateway.monitoring.svc.cluster.local:9091/metrics/job/my-batch-job ``` +## Официальные ресурсы + +- Официальный сайт: [https://github.com/prometheus/pushgateway](https://github.com/prometheus/pushgateway) +- Официальная документация: [https://github.com/prometheus/pushgateway](https://github.com/prometheus/pushgateway) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/prometheus-community/prometheus-pushgateway](https://artifacthub.io/packages/helm/prometheus-community/prometheus-pushgateway) diff --git a/addons/rabbitmq/README.md b/addons/rabbitmq/README.md new file mode 100644 index 0000000..57889bd --- /dev/null +++ b/addons/rabbitmq/README.md @@ -0,0 +1,28 @@ +# rabbitmq + +Аддон устанавливает RabbitMQ в Kubernetes в режимах `standalone` или `cluster`, с PVC. + +```yaml +addon_rabbitmq: true +rabbitmq_mode: "standalone" # standalone | cluster +rabbitmq_storage_size: "8Gi" +``` + +Для кластерного режима: + +```yaml +rabbitmq_mode: "cluster" +rabbitmq_replica_count: 3 +``` + +Установка: + +```bash +make addon-rabbitmq +``` + +## Официальные ресурсы + +- Официальный сайт: [https://www.rabbitmq.com/](https://www.rabbitmq.com/) +- Официальная документация: [https://www.rabbitmq.com/documentation.html](https://www.rabbitmq.com/documentation.html) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/bitnami/rabbitmq](https://artifacthub.io/packages/helm/bitnami/rabbitmq) diff --git a/addons/rabbitmq/playbook.yml b/addons/rabbitmq/playbook.yml new file mode 100644 index 0000000..4883c16 --- /dev/null +++ b/addons/rabbitmq/playbook.yml @@ -0,0 +1,7 @@ +--- +- name: Install RabbitMQ + hosts: k3s_master[0] + gather_facts: false + become: true + roles: + - role: "{{ playbook_dir }}/role" diff --git a/addons/rabbitmq/role/defaults/main.yml b/addons/rabbitmq/role/defaults/main.yml new file mode 100644 index 0000000..4ef98a1 --- /dev/null +++ b/addons/rabbitmq/role/defaults/main.yml @@ -0,0 +1,26 @@ +--- +# Версия чарта RabbitMQ +rabbitmq_version: "15.5.1" +# Namespace +rabbitmq_namespace: "rabbitmq" +# Репозиторий Bitnami +rabbitmq_chart_repo: "https://charts.bitnami.com/bitnami" + +# Режим: standalone | cluster +rabbitmq_mode: "standalone" +# Реплики для cluster режима +rabbitmq_replica_count: 3 + +# Логин/пароль +rabbitmq_auth_username: "appuser" +rabbitmq_auth_password: "{{ vault_rabbitmq_password | default('changeme-rabbitmq') }}" +# Erlang cookie для кластера +rabbitmq_erlang_cookie: "{{ vault_rabbitmq_erlang_cookie | default('changeme-rabbitmq-cookie') }}" + +# StorageClass +rabbitmq_storage_class: "" +# Размер PVC +rabbitmq_storage_size: "8Gi" + +# Метрики +rabbitmq_metrics_enabled: true diff --git a/addons/rabbitmq/role/molecule/default/converge.yml b/addons/rabbitmq/role/molecule/default/converge.yml new file mode 100644 index 0000000..2f68285 --- /dev/null +++ b/addons/rabbitmq/role/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: Converge rabbitmq defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.copy: + dest: /tmp/rabbitmq-facts.yaml + mode: "0644" + content: | + rabbitmq_mode: "standalone" diff --git a/addons/rabbitmq/role/molecule/default/molecule.yml b/addons/rabbitmq/role/molecule/default/molecule.yml new file mode 100644 index 0000000..561d789 --- /dev/null +++ b/addons/rabbitmq/role/molecule/default/molecule.yml @@ -0,0 +1,14 @@ +--- +driver: + name: docker +platforms: + - name: master01 + image: geerlingguy/docker-ubuntu2204-ansible:latest + pre_build_image: true +provisioner: + name: ansible + playbooks: + converge: converge.yml + verify: verify.yml +verifier: + name: ansible diff --git a/addons/rabbitmq/role/molecule/default/verify.yml b/addons/rabbitmq/role/molecule/default/verify.yml new file mode 100644 index 0000000..0246d7c --- /dev/null +++ b/addons/rabbitmq/role/molecule/default/verify.yml @@ -0,0 +1,13 @@ +--- +- name: Verify rabbitmq defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.slurp: + src: /tmp/rabbitmq-facts.yaml + register: facts_raw + - ansible.builtin.set_fact: + v: "{{ facts_raw.content | b64decode | from_yaml }}" + - ansible.builtin.assert: + that: + - v.rabbitmq_mode == "standalone" diff --git a/addons/rabbitmq/role/tasks/main.yml b/addons/rabbitmq/role/tasks/main.yml new file mode 100644 index 0000000..83bbe16 --- /dev/null +++ b/addons/rabbitmq/role/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: Add Bitnami Helm repo + kubernetes.core.helm_repository: + name: bitnami + repo_url: "{{ rabbitmq_chart_repo }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Deploy RabbitMQ via Helm + kubernetes.core.helm: + name: rabbitmq + chart_ref: bitnami/rabbitmq + chart_version: "{{ rabbitmq_version }}" + release_namespace: "{{ rabbitmq_namespace }}" + create_namespace: true + wait: true + timeout: "10m0s" + values: + replicaCount: "{{ rabbitmq_replica_count if rabbitmq_mode == 'cluster' else 1 }}" + auth: + username: "{{ rabbitmq_auth_username }}" + password: "{{ rabbitmq_auth_password }}" + erlangCookie: "{{ rabbitmq_erlang_cookie }}" + persistence: + enabled: true + size: "{{ rabbitmq_storage_size }}" + storageClass: "{{ rabbitmq_storage_class }}" + metrics: + enabled: "{{ rabbitmq_metrics_enabled | bool }}" + serviceMonitor: + enabled: "{{ rabbitmq_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" diff --git a/addons/redis/README.md b/addons/redis/README.md new file mode 100644 index 0000000..22c6e13 --- /dev/null +++ b/addons/redis/README.md @@ -0,0 +1,28 @@ +# redis + +Аддон устанавливает Redis в Kubernetes в режимах `standalone` или `replication` с PVC. + +```yaml +addon_redis: true +redis_architecture: "standalone" # standalone | replication +redis_storage_size: "8Gi" +``` + +Для кластера (replication): + +```yaml +redis_architecture: "replication" +redis_replica_count: 2 +``` + +Установка: + +```bash +make addon-redis +``` + +## Официальные ресурсы + +- Официальный сайт: [https://redis.io/](https://redis.io/) +- Официальная документация: [https://redis.io/docs/latest/](https://redis.io/docs/latest/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/bitnami/redis](https://artifacthub.io/packages/helm/bitnami/redis) diff --git a/addons/redis/playbook.yml b/addons/redis/playbook.yml new file mode 100644 index 0000000..bbfa53e --- /dev/null +++ b/addons/redis/playbook.yml @@ -0,0 +1,7 @@ +--- +- name: Install Redis + hosts: k3s_master[0] + gather_facts: false + become: true + roles: + - role: "{{ playbook_dir }}/role" diff --git a/addons/redis/role/defaults/main.yml b/addons/redis/role/defaults/main.yml new file mode 100644 index 0000000..4aa38e4 --- /dev/null +++ b/addons/redis/role/defaults/main.yml @@ -0,0 +1,26 @@ +--- +# Версия чарта Redis +redis_version: "19.6.4" +# Namespace +redis_namespace: "redis" +# Репозиторий Bitnami +redis_chart_repo: "https://charts.bitnami.com/bitnami" + +# Архитектура: standalone | replication +redis_architecture: "standalone" + +# Включить пароль (AUTH) +redis_auth_enabled: true +# Пароль Redis +redis_auth_password: "{{ vault_redis_password | default('changeme-redis') }}" + +# StorageClass +redis_storage_class: "" +# Размер PVC +redis_storage_size: "8Gi" + +# Реплики Redis (используется для replication) +redis_replica_count: 2 + +# Включить метрики +redis_metrics_enabled: true diff --git a/addons/redis/role/molecule/default/converge.yml b/addons/redis/role/molecule/default/converge.yml new file mode 100644 index 0000000..f22849e --- /dev/null +++ b/addons/redis/role/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- +- name: Converge redis defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.copy: + dest: /tmp/redis-facts.yaml + mode: "0644" + content: | + redis_architecture: "standalone" + redis_replica_count: "2" diff --git a/addons/redis/role/molecule/default/molecule.yml b/addons/redis/role/molecule/default/molecule.yml new file mode 100644 index 0000000..561d789 --- /dev/null +++ b/addons/redis/role/molecule/default/molecule.yml @@ -0,0 +1,14 @@ +--- +driver: + name: docker +platforms: + - name: master01 + image: geerlingguy/docker-ubuntu2204-ansible:latest + pre_build_image: true +provisioner: + name: ansible + playbooks: + converge: converge.yml + verify: verify.yml +verifier: + name: ansible diff --git a/addons/redis/role/molecule/default/verify.yml b/addons/redis/role/molecule/default/verify.yml new file mode 100644 index 0000000..4d30748 --- /dev/null +++ b/addons/redis/role/molecule/default/verify.yml @@ -0,0 +1,13 @@ +--- +- name: Verify redis defaults + hosts: all + gather_facts: false + tasks: + - ansible.builtin.slurp: + src: /tmp/redis-facts.yaml + register: facts_raw + - ansible.builtin.set_fact: + v: "{{ facts_raw.content | b64decode | from_yaml }}" + - ansible.builtin.assert: + that: + - v.redis_architecture == "standalone" diff --git a/addons/redis/role/tasks/main.yml b/addons/redis/role/tasks/main.yml new file mode 100644 index 0000000..487d266 --- /dev/null +++ b/addons/redis/role/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Add Bitnami Helm repo + kubernetes.core.helm_repository: + name: bitnami + repo_url: "{{ redis_chart_repo }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" + +- name: Deploy Redis via Helm + kubernetes.core.helm: + name: redis + chart_ref: bitnami/redis + chart_version: "{{ redis_version }}" + release_namespace: "{{ redis_namespace }}" + create_namespace: true + wait: true + timeout: "10m0s" + values: + architecture: "{{ redis_architecture }}" + auth: + enabled: "{{ redis_auth_enabled | bool }}" + password: "{{ redis_auth_password }}" + master: + persistence: + enabled: true + size: "{{ redis_storage_size }}" + storageClass: "{{ redis_storage_class }}" + replica: + replicaCount: "{{ redis_replica_count if redis_architecture == 'replication' else 0 }}" + persistence: + enabled: "{{ redis_architecture == 'replication' }}" + size: "{{ redis_storage_size }}" + storageClass: "{{ redis_storage_class }}" + metrics: + enabled: "{{ redis_metrics_enabled | bool }}" + serviceMonitor: + enabled: "{{ redis_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}" + environment: + KUBECONFIG: "{{ k3s_kubeconfig_path }}" diff --git a/addons/smtp-relay/README.md b/addons/smtp-relay/README.md index 9ec4801..8834214 100644 --- a/addons/smtp-relay/README.md +++ b/addons/smtp-relay/README.md @@ -118,3 +118,8 @@ kubectl logs -n smtp-relay deployment/smtp-relay -f kubectl run smtp-test --image=alpine --rm -it -- \ sh -c "apk add --no-cache ssmtp && echo 'Subject: test' | ssmtp user@example.com" ``` +## Официальные ресурсы + +- Официальный сайт: [https://www.postfix.org/](https://www.postfix.org/) +- Официальная документация: [https://www.postfix.org/documentation.html](https://www.postfix.org/documentation.html) +- Версии Helm chart / ПО: [https://hub.docker.com/_/postfix](https://hub.docker.com/_/postfix) diff --git a/addons/splitgw/README.md b/addons/splitgw/README.md index 1957260..d92b11b 100644 --- a/addons/splitgw/README.md +++ b/addons/splitgw/README.md @@ -526,3 +526,8 @@ iptables -t mangle -X SPLITGW ip rule del fwmark 0x1 lookup 100 ip route flush table 100 ``` +## Официальные ресурсы + +- Официальный сайт: [https://sing-box.sagernet.org/](https://sing-box.sagernet.org/) +- Официальная документация: [https://sing-box.sagernet.org/configuration/](https://sing-box.sagernet.org/configuration/) +- Версии Helm chart / ПО: [https://github.com/SagerNet/sing-box/releases](https://github.com/SagerNet/sing-box/releases) diff --git a/addons/technitium-dns/README.md b/addons/technitium-dns/README.md index 392954d..e879771 100644 --- a/addons/technitium-dns/README.md +++ b/addons/technitium-dns/README.md @@ -1,173 +1,63 @@ # technitium-dns -Highly-available internal DNS based on [Technitium DNS Server](https://technitium.com/dns/). +Аддон разворачивает внутренний DNS на базе Technitium DNS Server (Primary/Secondary) для домашнего или лабораторного Kubernetes-кластера. -Deploys Primary + optional Secondary instance, each behind a **kube-vip** `LoadBalancer` service with a static IP. A `CronJob` syncs all Primary zones to Secondary automatically every 5 minutes via the Technitium REST API. +## Что делает аддон -## Architecture +- поднимает primary DNS-инстанс; +- опционально поднимает secondary DNS-инстанс; +- публикует DNS через `LoadBalancer` IP (kube-vip); +- настраивает синхронизацию зон между primary и secondary; +- может дополнительно развернуть ExternalDNS для автозаписи DNS из Ingress/Service. -``` -Clients (Keenetic / DHCP) - │ - ├─ DNS 192.168.1.53 → technitium-dns-primary (Deployment, RWO PVC) - └─ DNS 192.168.1.54 → technitium-dns-secondary (Deployment, RWO PVC) +## Быстрый старт -CronJob sync (*/5 min): primary REST API → list zones → create missing Secondary zones on secondary → forceSync - -Web UI (Ingress): - http://dns.home.local → primary :5380 - http://dns-secondary.home.local → secondary :5380 - -ExternalDNS (optional, disabled by default): - Watches Ingress/Service → RFC 2136 DDNS → primary → AXFR → secondary -``` - -## Quick start - -### 1. Set vault password +1. Включите аддон в `group_vars/all/addons.yml`: ```yaml -# group_vars/all/vault.yml (encrypted with ansible-vault) -technitium_dns_admin_password: "your-strong-password" -``` - -### 2. Enable and configure - -```yaml -# group_vars/all/addons.yml addon_technitium_dns: true - -technitium_dns_primary_ip: "192.168.1.53" # kube-vip managed IP +technitium_dns_primary_ip: "192.168.1.53" technitium_dns_secondary_ip: "192.168.1.54" -technitium_dns_domain: "home.local" +technitium_dns_domain: "home.local" technitium_dns_primary_host: "dns.home.local" technitium_dns_secondary_host: "dns-secondary.home.local" ``` -### 3. Deploy +2. Добавьте пароль администратора в `group_vars/all/vault.yml`: + +```yaml +technitium_dns_admin_password: "сильный-пароль" +``` + +3. Установите аддон: ```bash make addon-technitium-dns -# or: -ansible-playbook playbooks/addons.yml --tags technitium-dns ``` -### 4. Create the internal zone (first time only) +## Настройка DNS-клиентов -Open `http://dns.home.local/` → login as `admin` → **Zones → Add Zone → Primary** → enter `home.local`. +Укажите DNS-серверы на роутере/DHCP: -Then add `A` records for your services under `home.local`. +- primary: `192.168.1.53` +- secondary: `192.168.1.54` ---- - -## Keenetic router — DNS configuration - -In Keenetic web interface: **Internet → DNS servers** - -| Field | Value | -|-------|-------| -| Primary DNS | `192.168.1.53` | -| Secondary DNS | `192.168.1.54` | - -Or via Keenetic CLI: -``` -ip name-server 192.168.1.53 -ip name-server 192.168.1.54 -``` - ---- - -## Zone sync (Primary → Secondary) - -The `technitium-dns-sync` `CronJob` runs every 5 minutes. It: - -1. Logs in to both instances with the shared admin password. -2. Lists all `Primary` and `Forwarder` zones on primary. -3. Creates missing zones on secondary as `Secondary` type pointing to `primary.ip`. -4. Calls `forceSyncZone` for every zone. - -Manual trigger: -```bash -kubectl create job --from=cronjob/technitium-dns-sync sync-manual-1 \ - -n technitium-dns -kubectl -n technitium-dns logs -l app.kubernetes.io/component=sync -f -``` - ---- - -## ExternalDNS (optional) - -Automatically creates DNS records on primary from `Ingress` and `Service` resources via **RFC 2136 DDNS**. Secondary picks up changes via the sync CronJob. - -### Enable - -```yaml -# group_vars/all/addons.yml -technitium_dns_externaldns_enabled: true -technitium_dns_externaldns_domain_filter: - - "home.local" -technitium_dns_externaldns_policy: "upsert-only" # or "sync" to also delete -``` - -### Enable DDNS on zones in Technitium - -For each zone that ExternalDNS should write to: - -1. Open Web UI → Zones → `home.local` → **Zone Settings** -2. **Dynamic Updates** → set to `Allow` (or `Allow Signed` for TSIG) -3. Save. - ---- - -## Variables reference - -| Variable | Default | Description | -|----------|---------|-------------| -| `technitium_dns_primary_ip` | `192.168.1.53` | kube-vip LB IP for primary | -| `technitium_dns_secondary_enabled` | `true` | Deploy secondary instance | -| `technitium_dns_secondary_ip` | `192.168.1.54` | kube-vip LB IP for secondary | -| `technitium_dns_primary_node` | `""` | Pin primary to node hostname | -| `technitium_dns_secondary_node` | `""` | Pin secondary to node hostname | -| `technitium_dns_domain` | `home.local` | Local DNS domain | -| `technitium_dns_forwarders` | `[1.1.1.1, 8.8.8.8]` | Upstream resolvers | -| `technitium_dns_recursion` | `AllowOnlyForPrivateNetworks` | Recursion mode | -| `technitium_dns_admin_password` | — | **In vault.yml** — admin password | -| `technitium_dns_storage_class` | `""` | StorageClass (empty = cluster default) | -| `technitium_dns_storage_size` | `1Gi` | PVC size per instance | -| `technitium_dns_ingress_enabled` | `true` | Expose Web UI via Ingress | -| `technitium_dns_primary_host` | `dns.home.local` | Primary Web UI hostname | -| `technitium_dns_secondary_host` | `dns-secondary.home.local` | Secondary Web UI hostname | -| `technitium_dns_sync_enabled` | `true` | Enable zone sync CronJob | -| `technitium_dns_sync_schedule` | `*/5 * * * *` | Sync frequency | -| `technitium_dns_externaldns_enabled` | `false` | Deploy ExternalDNS | -| `technitium_dns_externaldns_policy` | `upsert-only` | ExternalDNS sync policy | - ---- - -## Troubleshooting - -**DNS not resolving after deploy** +## Проверка ```bash -# Check pods are Running kubectl -n technitium-dns get pods - -# Test DNS resolution from a pod kubectl run dnstest --rm -it --image=busybox -- nslookup kubernetes.default 192.168.1.53 ``` -**Sync job failing** +## Полезные параметры -```bash -kubectl -n technitium-dns logs -l app.kubernetes.io/component=sync --tail=100 -``` +- `technitium_dns_secondary_enabled` — включить второй DNS-инстанс; +- `technitium_dns_sync_enabled` — включить CronJob синхронизации зон; +- `technitium_dns_externaldns_enabled` — включить интеграцию с ExternalDNS; +- `technitium_dns_storage_class` / `technitium_dns_storage_size` — настройки хранения. -Common cause: secondary is not yet ready when the first sync runs. The job will retry on the next schedule. +## Официальные ресурсы -**Secondary shows stale records** - -Force a manual sync (see above). If secondary zone type is wrong, delete the zone on secondary and let sync recreate it. - -**kube-vip IP not assigned** - -Ensure the IP is in the kube-vip address pool (check `kube-vip` ConfigMap or CiliumLoadBalancerIPPool) and not already in use. +- Официальный сайт: [https://technitium.com/dns/](https://technitium.com/dns/) +- Официальная документация: [https://github.com/TechnitiumSoftware/DnsServer/wiki](https://github.com/TechnitiumSoftware/DnsServer/wiki) +- Версии Helm chart / ПО: [https://hub.docker.com/r/technitium/dns-server/tags](https://hub.docker.com/r/technitium/dns-server/tags) diff --git a/addons/tempo/README.md b/addons/tempo/README.md index d2ebe95..bfd7f0f 100644 --- a/addons/tempo/README.md +++ b/addons/tempo/README.md @@ -103,3 +103,8 @@ env: - Name: `TraceID` - Regex: `trace_id=(\w+)` - URL: `${__value.raw}` (с datasource Tempo) +## Официальные ресурсы + +- Официальный сайт: [https://grafana.com/oss/tempo/](https://grafana.com/oss/tempo/) +- Официальная документация: [https://grafana.com/docs/tempo/latest/](https://grafana.com/docs/tempo/latest/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/grafana/tempo](https://artifacthub.io/packages/helm/grafana/tempo) diff --git a/addons/vault/README.md b/addons/vault/README.md index eae59f2..8d483b4 100644 --- a/addons/vault/README.md +++ b/addons/vault/README.md @@ -282,3 +282,8 @@ kubectl exec -n vault vault-0 -- vault operator seal # Raft cluster (HA) kubectl exec -n vault vault-0 -- vault operator raft list-peers ``` +## Официальные ресурсы + +- Официальный сайт: [https://www.vaultproject.io/](https://www.vaultproject.io/) +- Официальная документация: [https://developer.hashicorp.com/vault/docs](https://developer.hashicorp.com/vault/docs) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/hashicorp/vault](https://artifacthub.io/packages/helm/hashicorp/vault) diff --git a/addons/vaultwarden/README.md b/addons/vaultwarden/README.md index 7cc9c56..471f974 100644 --- a/addons/vaultwarden/README.md +++ b/addons/vaultwarden/README.md @@ -92,3 +92,8 @@ kubectl cp vaultwarden/:/tmp/backup.sqlite3 ./vaultwarden-backup.sqlite3 kubectl logs -n vaultwarden deployment/vaultwarden -f kubectl get pods -n vaultwarden ``` +## Официальные ресурсы + +- Официальный сайт: [https://github.com/dani-garcia/vaultwarden](https://github.com/dani-garcia/vaultwarden) +- Официальная документация: [https://github.com/dani-garcia/vaultwarden/wiki](https://github.com/dani-garcia/vaultwarden/wiki) +- Версии Helm chart / ПО: [https://github.com/dani-garcia/vaultwarden/releases](https://github.com/dani-garcia/vaultwarden/releases) diff --git a/addons/velero/README.md b/addons/velero/README.md index b6a2a64..704a7c8 100644 --- a/addons/velero/README.md +++ b/addons/velero/README.md @@ -123,3 +123,8 @@ kubectl get pods -n velero kubectl logs -n velero deployment/velero -f velero backup describe my-backup --details ``` +## Официальные ресурсы + +- Официальный сайт: [https://velero.io/](https://velero.io/) +- Официальная документация: [https://velero.io/docs/](https://velero.io/docs/) +- Версии Helm chart / ПО: [https://artifacthub.io/packages/helm/vmware-tanzu/velero](https://artifacthub.io/packages/helm/vmware-tanzu/velero) diff --git a/addons/yandex-dns-controller/README.md b/addons/yandex-dns-controller/README.md index 853fc09..773b6e8 100644 --- a/addons/yandex-dns-controller/README.md +++ b/addons/yandex-dns-controller/README.md @@ -218,3 +218,8 @@ kubectl -n yandex-dns-controller edit cm yandex-dns-zones - Не удаляет Это гарантирует что MX-записи Яндекс.Почты, DKIM, SPF и другие критичные записи не будут затронуты. +## Официальные ресурсы + +- Официальный сайт: [https://360.yandex.ru/](https://360.yandex.ru/) +- Официальная документация: [https://yandex.ru/dev/api360/doc/ru/ref/DnsService](https://yandex.ru/dev/api360/doc/ru/ref/DnsService) +- Версии Helm chart / ПО: [https://oauth.yandex.ru/](https://oauth.yandex.ru/) diff --git a/docs/addons.md b/docs/addons.md index 08b86af..88adace 100644 --- a/docs/addons.md +++ b/docs/addons.md @@ -55,11 +55,17 @@ make -k molecule-addon-all | longhorn | `addon_longhorn` | Distributed block storage | [→](../addons/longhorn/README.md) | | minio | `addon_minio` | S3-совместимое объектное хранилище | [→](../addons/minio/README.md) | | csi-s3 | `addon_csi_s3` | S3/MinIO как PVC | [→](../addons/csi-s3/README.md) | -| csi-ceph | `addon_csi_ceph` | Rook-Ceph distributed storage | [→](../addons/csi-ceph/README.md) | +| csi-ceph | `addon_csi_ceph` | Kubernetes CSI Ceph (Rook-Ceph, PVC на Ceph) | [→](../addons/csi-ceph/README.md) | +| ceph-rock | `addon_ceph_rock` | Rook-Ceph distributed storage | [→](../addons/ceph-rock/README.md) | | csi-glusterfs | `addon_csi_glusterfs` | GlusterFS CSI driver | [→](../addons/csi-glusterfs/README.md) | | **Базы данных** | | | | | postgresql | `addon_postgresql` | PostgreSQL (shared для аддонов) | [→](../addons/postgresql/README.md) | | mysql | `addon_mysql` | MySQL (Bitnami) | [→](../addons/mysql/README.md) | +| redis | `addon_redis` | Redis (standalone/replication) | [→](../addons/redis/README.md) | +| mongodb | `addon_mongodb` | MongoDB (standalone/replicaset) | [→](../addons/mongodb/README.md) | +| kafka | `addon_kafka` | Apache Kafka (KRaft, standalone/cluster) | [→](../addons/kafka/README.md) | +| kafka-ui | `addon_kafka_ui` | Kafka UI с авторизацией (логин/пароль) | [→](../addons/kafka-ui/README.md) | +| rabbitmq | `addon_rabbitmq` | RabbitMQ (standalone/cluster) | [→](../addons/rabbitmq/README.md) | | databasus | `addon_databasus` | Web UI для бэкапов БД | [→](../addons/databasus/README.md) | | **Observability** | | | | | metrics-server | `addon_metrics_server` | kubectl top + HPA | [→](../addons/metrics-server/README.md) | @@ -71,6 +77,7 @@ make -k molecule-addon-all | **CI/CD** | | | | | jenkins | `addon_jenkins` | Jenkins CI/CD + k8s agents | [→](../addons/jenkins/README.md) | | gitea | `addon_gitea` | Git hosting + Gitea Actions | [→](../addons/gitea/README.md) | +| gitlab | `addon_gitlab` | GitLab + GitLab Runner в pod-режиме | [→](../addons/gitlab/README.md) | | argocd | `addon_argocd` | GitOps (ArgoCD) | [→](../addons/argocd/README.md) | | **Безопасность** | | | | | vault | `addon_vault` | HashiCorp Vault | [→](../addons/vault/README.md) | @@ -87,6 +94,7 @@ make -k molecule-addon-all | **Медиасервер** | | | | | mediaserver | `addon_mediaserver` | Plex, Sonarr, Radarr, Lidarr, Bazarr, Prowlarr + Hysteria2 sidecar, Overseerr, Transmission, Samba | [→](../addons/mediaserver/README.md) | | **Сеть / VPN** | | | | +| hysteria2-server | `addon_hysteria2_server` | Hysteria2 сервер для прокси-клиентов и split-туннеля | [→](../addons/hysteria2-server/README.md) | | splitgw | `addon_splitgw` | Прозрачный split-tunnel gateway: sing-box + Hysteria2 TPROXY, YouTube→прокси, RU→прямой | [→](../addons/splitgw/README.md) | | ingress-proxypass | `addon_ingress_proxypass` | Проксировать внешние сервисы (IP:PORT) через ingress-nginx по домену — Service + Endpoints + Ingress | [→](../addons/ingress-proxypass/README.md) | | ingress-add-domains | `addon_ingress_add_domains` | Добавить домены к существующим K8s сервисам — только Ingress, без Service/Endpoints | [→](../addons/ingress-add-domains/README.md) | @@ -120,9 +128,15 @@ addon_csi_s3: false # S3 как PVC # ── Базы данных ─────────────────────────────────────────────────────────────── addon_postgresql: false # shared PostgreSQL addon_mysql: false +addon_redis: false +addon_mongodb: false +addon_kafka: false +addon_kafka_ui: false +addon_rabbitmq: false # ── CI/CD ───────────────────────────────────────────────────────────────────── addon_gitea: false +addon_gitlab: false addon_jenkins: false addon_argocd: false @@ -164,9 +178,11 @@ addon_authelia: false # SSO Forward-auth + OIDC (Gitea/Grafana/ | `loki` | `minio` (опционально) | S3 backend при addon_minio: true | | `velero` | `minio` | S3 backend для бэкапов | | `csi-s3` | `minio` (опционально) | Endpoint берётся автоматически | -| `gitea` | `postgresql` (опционально) | Своя БД при addon_postgresql: true | -| `harbor` | `postgresql` (опционально) | Своя БД при addon_postgresql: true | -| `nextcloud` | `postgresql` (опционально) | Своя БД при addon_postgresql: true | +| `gitea` | `postgresql` (опционально) | `gitea_database_mode`: `auto`/`internal`/`external_postgresql` | +| `gitlab` | `postgresql` (опционально) | `gitlab_database_mode`: `auto`/`internal`/`external_postgresql` | +| `harbor` | `postgresql` (опционально) | `harbor_database_mode`: `auto`/`internal`/`external_postgresql` | +| `nextcloud` | `postgresql` | `nextcloud_database_mode`: `external_postgresql` по умолчанию | +| `kafka-ui` | `kafka` (рекомендуется) | `kafka_ui_bootstrap_servers` указывает на кластер Kafka | | `databasus` | `postgresql`, `mysql`, `minio` | Все подключения автоматические | | `jenkins` | `vault` (опционально) | JCasC Vault URL при addon_vault: true | | `external-secrets` | `vault` | ClusterSecretStore к Vault | @@ -177,7 +193,24 @@ addon_authelia: false # SSO Forward-auth + OIDC (Gitea/Grafana/ | `ingress-proxypass` | `ingress-nginx` | Требует работающий Ingress controller | | `ingress-add-domains` | `ingress-nginx` | Требует работающий Ingress controller | | `technitium-dns` | kube-vip | LoadBalancer IP через kube-vip аннотацию | -| `authelia` | `ingress-nginx` | Forward-auth через annotations; PostgreSQL/Redis — опционально | +| `authelia` | `ingress-nginx`, `redis` (опционально) | `authelia_redis_mode`: `auto`/`internal`/`external_redis`/`disabled` | +| `argocd` | `redis` (опционально) | `argocd_redis_mode`: `auto`/`internal`/`external_redis` | + +## Явные переключатели встроенных/внешних сервисов + +Новые аддоны и интеграции поддерживают явный выбор: использовать нативный компонент чарта или внешний addon-сервис. + +```yaml +# PostgreSQL для приложений +gitea_database_mode: "auto" # auto | internal | external_postgresql +gitlab_database_mode: "auto" # auto | internal | external_postgresql +harbor_database_mode: "auto" # auto | internal | external_postgresql +nextcloud_database_mode: "external_postgresql" # auto | sqlite | external_postgresql + +# Redis для auth/CI +authelia_redis_mode: "auto" # auto | internal | external_redis | disabled +argocd_redis_mode: "auto" # auto | internal | external_redis +``` ## MediaServer @@ -344,12 +377,21 @@ vault_grafana_password: "" vault_kiali_token: "" vault_gitea_admin_password: "" vault_gitea_db_password: "" +vault_gitlab_admin_password: "" +vault_gitlab_db_password: "" vault_harbor_admin_password: "Harbor12345" vault_harbor_db_password: "" vault_postgresql_postgres_password: "" vault_postgresql_password: "" vault_mysql_root_password: "" vault_mysql_password: "" +vault_redis_password: "" +vault_mongodb_root_password: "" +vault_mongodb_password: "" +vault_kafka_client_password: "" +vault_kafka_ui_password: "" +vault_rabbitmq_password: "" +vault_rabbitmq_erlang_cookie: "" vault_minio_root_user: "admin" vault_minio_root_password: "" vault_velero_s3_access_key: "" diff --git a/docs/cicd.md b/docs/cicd.md index 57e167e..931b874 100644 --- a/docs/cicd.md +++ b/docs/cicd.md @@ -1,6 +1,6 @@ # CI/CD -Инструменты непрерывной интеграции и доставки: Jenkins, Gitea Actions, ArgoCD. +Инструменты непрерывной интеграции и доставки: Jenkins, Gitea Actions, GitLab CI/CD, ArgoCD. ## Jenkins @@ -212,6 +212,32 @@ argocd_ingress_host: "argocd.example.com" make addon-argocd ``` +### Redis режим для ArgoCD + +```yaml +argocd_redis_mode: "auto" # auto | internal | external_redis +``` + +Если в кластере уже установлен `addon_redis: true`, режим `auto` подключит внешний Redis. + +--- + +## GitLab + Runner + +GitLab Community Edition с GitLab Runner в pod-режиме. Подробнее: [addons/gitlab/README.md](../addons/gitlab/README.md). + +```yaml +addon_gitlab: true +gitlab_ingress_host: "gitlab.example.com" +gitlab_runner_install: true +gitlab_runner_replicas: 2 +gitlab_database_mode: "auto" # auto | internal | external_postgresql +``` + +```bash +make addon-gitlab +``` + ### Application manifest ```yaml diff --git a/docs/configuration.md b/docs/configuration.md index 6b355e4..f397dfe 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -131,6 +131,20 @@ annotations: cert-manager.io/cluster-issuer: "letsencrypt-prod" ``` +## Режимы встроенных/внешних БД и Redis + +```yaml +# Приложения с PostgreSQL +gitea_database_mode: "auto" # auto | internal | external_postgresql +gitlab_database_mode: "auto" # auto | internal | external_postgresql +harbor_database_mode: "auto" # auto | internal | external_postgresql +nextcloud_database_mode: "external_postgresql" # auto | sqlite | external_postgresql + +# Приложения с Redis +authelia_redis_mode: "auto" # auto | internal | external_redis | disabled +argocd_redis_mode: "auto" # auto | internal | external_redis +``` + ## Ansible Vault ```bash diff --git a/docs/security.md b/docs/security.md index a3f1e4f..fcc264a 100644 --- a/docs/security.md +++ b/docs/security.md @@ -74,6 +74,104 @@ annotations: Секрет будет доступен в поде как `/vault/secrets/config.env`. +### Примеры: как подключать env в манифесты из HashiCorp Vault + +#### Вариант 1 — Vault Agent Injector + `source /vault/secrets/*.env` + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app-with-injector + namespace: my-app +spec: + replicas: 1 + selector: + matchLabels: + app: app-with-injector + template: + metadata: + labels: + app: app-with-injector + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "my-app" + vault.hashicorp.com/agent-inject-secret-app.env: "secret/data/myapp/config" + vault.hashicorp.com/agent-inject-template-app.env: | + {{- with secret "secret/data/myapp/config" -}} + DB_PASSWORD={{ .Data.data.db_password }} + API_KEY={{ .Data.data.api_key }} + {{- end }} + spec: + serviceAccountName: my-app + containers: + - name: app + image: ghcr.io/example/app:latest + command: ["/bin/sh", "-c"] + args: + - | + set -a + . /vault/secrets/app.env + set +a + exec /app/start +``` + +#### Вариант 2 — Vault → ExternalSecret → `envFrom.secretRef` + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: app-env + namespace: my-app +spec: + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + target: + name: app-env + data: + - secretKey: DB_PASSWORD + remoteRef: + key: secret/myapp + property: db_password + - secretKey: API_KEY + remoteRef: + key: secret/myapp + property: api_key +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: app-with-envfrom + namespace: my-app +spec: + template: + spec: + containers: + - name: app + image: ghcr.io/example/app:latest + envFrom: + - secretRef: + name: app-env +``` + +#### Вариант 3 — отдельные env-переменные через `secretKeyRef` + +```yaml +env: + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: app-env + key: DB_PASSWORD + - name: API_KEY + valueFrom: + secretKeyRef: + name: app-env + key: API_KEY +``` + ### Kubernetes Auth Method ```bash diff --git a/docs/storage.md b/docs/storage.md index 174809b..4b3a915 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -102,6 +102,8 @@ spec: Block (RWO) + Filesystem (RWX) storage через Ceph. Требует минимум 3 ноды с незанятыми дисками. ```yaml +addon_ceph_rock: true +# или legacy-алиас: addon_csi_ceph: true rook_ceph_mon_count: 3 rook_ceph_block_replica_count: 3 @@ -111,7 +113,7 @@ StorageClasses: - `rook-ceph-block` — ReadWriteOnce (БД) - `rook-ceph-filesystem` — ReadWriteMany (общие файлы) -Подробнее: [addons/csi-ceph/README.md](../addons/csi-ceph/README.md). +Подробнее: [addons/ceph-rock/README.md](../addons/ceph-rock/README.md), [addons/csi-ceph/README.md](../addons/csi-ceph/README.md). --- diff --git a/group_vars/all/addons.yml b/group_vars/all/addons.yml index 1dd3690..0de3718 100644 --- a/group_vars/all/addons.yml +++ b/group_vars/all/addons.yml @@ -19,6 +19,11 @@ addon_prometheus_stack: false addon_istio: false # ArgoCD GitOps addon_argocd: false +# Настройки Redis для ArgoCD: +# argocd_redis_mode: "auto" # auto | internal | external_redis +# argocd_redis_host: "redis-master.redis.svc.cluster.local" +# argocd_redis_port: 6379 +# argocd_redis_password: "{{ vault_redis_password }}" # Longhorn: реплицируемый блочный диск addon_longhorn: false # Web UI кластера (kubernetes-dashboard) @@ -27,6 +32,18 @@ addon_kubernetes_dashboard: false addon_postgresql: false # Bitnami MySQL addon_mysql: false +# GitLab + GitLab Runner в Kubernetes +addon_gitlab: false +# Redis (standalone/replication) +addon_redis: false +# MongoDB (standalone/replicaset) +addon_mongodb: false +# Kafka (standalone/cluster, KRaft) +addon_kafka: false +# Kafka UI (web-интерфейс Kafka) +addon_kafka_ui: false +# RabbitMQ (standalone/cluster) +addon_rabbitmq: false # Databasus: веб-управление бэкапами БД addon_databasus: false # MinIO S3 @@ -53,7 +70,9 @@ addon_tempo: false addon_pushgateway: false # CSI: монтирование S3 как PVC; с MinIO — авто-настройка endpoint addon_csi_s3: false -# Rook-Ceph: блочное RWO и CephFS RWX +# CSI Ceph: ceph-csi драйверы RBD/CephFS для PVC +addon_ceph_rock: false +# Старый флаг csi-ceph (основной для ceph-csi драйвера) addon_csi_ceph: false # CSI GlusterFS: нужен Heketi/Gluster снаружи addon_csi_glusterfs: false @@ -232,6 +251,94 @@ mysql_external_host: "mysql.mysql.svc.cluster.local" # Порт SQL mysql_external_port: 3306 +# ─── GitLab + GitLab Runner ─────────────────────────────────────────────────── +# GitLab ставится Helm-чартом, runner включается как поды в кластере. +# При addon_postgresql: true — используется внешний PostgreSQL из addon_postgresql. +# gitlab_version: "" # "" = последняя версия чарта +# gitlab_namespace: "gitlab" +# gitlab_ingress_enabled: true +# gitlab_ingress_host: "gitlab.example.com" +# gitlab_ingress_class: "nginx" +# gitlab_ingress_tls: false +# gitlab_domain: "example.com" # базовый домен для GitLab chart +# gitlab_admin_password: "{{ vault_gitlab_admin_password }}" +# gitlab_storage_class: "" # "" = default StorageClass +# gitlab_gitaly_storage_size: "20Gi" +# gitlab_runner_install: true +# gitlab_runner_replicas: 2 +# gitlab_runner_concurrent: 10 +# gitlab_database_mode: "auto" # auto | internal | external_postgresql +# gitlab_db_name: "gitlabhq_production" +# gitlab_db_username: "gitlab" +# gitlab_db_password: "{{ vault_gitlab_db_password }}" # для внешнего PostgreSQL + +# ─── Redis ───────────────────────────────────────────────────────────────────── +# Bitnami Redis. architecture: standalone | replication +# redis_version: "19.6.4" +# redis_namespace: "redis" +# redis_architecture: "standalone" +# redis_auth_enabled: true +# redis_auth_password: "{{ vault_redis_password }}" +# redis_storage_class: "" +# redis_storage_size: "8Gi" +# redis_replica_count: 2 # используется при architecture=replication +# redis_metrics_enabled: true + +# ─── MongoDB ─────────────────────────────────────────────────────────────────── +# Bitnami MongoDB. architecture: standalone | replicaset +# mongodb_version: "15.6.21" +# mongodb_namespace: "mongodb" +# mongodb_architecture: "standalone" +# mongodb_auth_enabled: true +# mongodb_root_user: "root" +# mongodb_root_password: "{{ vault_mongodb_root_password }}" +# mongodb_username: "appuser" +# mongodb_password: "{{ vault_mongodb_password }}" +# mongodb_database: "appdb" +# mongodb_storage_class: "" +# mongodb_storage_size: "8Gi" +# mongodb_replica_count: 3 # используется при architecture=replicaset +# mongodb_metrics_enabled: true + +# ─── Kafka (KRaft) ───────────────────────────────────────────────────────────── +# Bitnami Kafka без ZooKeeper (KRaft). mode: standalone | cluster +# kafka_version: "30.1.8" +# kafka_namespace: "kafka" +# kafka_mode: "standalone" +# kafka_storage_class: "" +# kafka_storage_size: "20Gi" +# kafka_auth_enabled: false +# kafka_client_passwords: ["{{ vault_kafka_client_password }}"] +# kafka_controller_replica_count: 3 # используется в mode=cluster +# kafka_broker_replica_count: 3 # используется в mode=cluster +# kafka_metrics_enabled: true + +# ─── Kafka UI ────────────────────────────────────────────────────────────────── +# Web UI для просмотра топиков, consumer groups и сообщений Kafka. +# kafka_ui_version: "0.7.6" +# kafka_ui_namespace: "kafka-ui" +# kafka_ui_ingress_enabled: true +# kafka_ui_ingress_host: "kafka-ui.example.com" +# kafka_ui_ingress_class: "nginx" +# kafka_ui_ingress_tls: false +# kafka_ui_cluster_name: "k3s-kafka" +# kafka_ui_bootstrap_servers: "kafka.kafka.svc.cluster.local:9092" +# kafka_ui_auth_username: "admin" +# kafka_ui_auth_password: "{{ vault_kafka_ui_password }}" + +# ─── RabbitMQ ────────────────────────────────────────────────────────────────── +# Bitnami RabbitMQ. mode: standalone | cluster +# rabbitmq_version: "15.5.1" +# rabbitmq_namespace: "rabbitmq" +# rabbitmq_mode: "standalone" +# rabbitmq_auth_username: "appuser" +# rabbitmq_auth_password: "{{ vault_rabbitmq_password }}" +# rabbitmq_erlang_cookie: "{{ vault_rabbitmq_erlang_cookie }}" +# rabbitmq_storage_class: "" +# rabbitmq_storage_size: "8Gi" +# rabbitmq_replica_count: 3 # используется в mode=cluster +# rabbitmq_metrics_enabled: true + # ─── Databasus ──────────────────────────────────────────────────────────────── # Databasus автоматически получает подключение к PostgreSQL/MySQL # если соответствующий аддон включён (addon_postgresql/addon_mysql: true). @@ -280,7 +387,7 @@ minio_api_ingress_host: "s3.example.com" # vault_harbor_db_password: "..." # используется только при addon_postgresql: true # harbor_ingress_host: "harbor.example.com" # harbor_registry_storage_size: "20Gi" -# harbor_database_type: "internal" # внешняя PostgreSQL при addon_postgresql: true +# harbor_database_mode: "auto" # auto | internal | external_postgresql # ─── Gitea ──────────────────────────────────────────────────────────────────── # Пароли задаются в vault.yml: @@ -289,6 +396,7 @@ minio_api_ingress_host: "s3.example.com" # gitea_ingress_host: "gitea.example.com" # gitea_version: "" # "" = автопоиск последней версии # gitea_ssh_enabled: false # NodePort SSH для git clone +# gitea_database_mode: "auto" # auto | internal | external_postgresql # ─── ownCloud (OCIS) ────────────────────────────────────────────────────────── # Пароль задаётся в vault.yml: @@ -304,6 +412,7 @@ minio_api_ingress_host: "s3.example.com" # nextcloud_ingress_host: "nextcloud.example.com" # nextcloud_version: "" # "" = автопоиск последней версии # nextcloud_storage_size: "20Gi" +# nextcloud_database_mode: "external_postgresql" # auto | sqlite | external_postgresql # ─── Loki ───────────────────────────────────────────────────────────────────── # loki_storage_type: "filesystem" # filesystem (авто: s3 если addon_minio: true) @@ -338,15 +447,29 @@ minio_api_ingress_host: "s3.example.com" # csi_s3_access_key: "" # авто из vault_minio_root_user # csi_s3_secret_key: "" # авто из vault_minio_root_password -# ─── CSI Ceph / Rook-Ceph ───────────────────────────────────────────────────── -# Distributed block (RWO) и filesystem (RWX) storage на базе Ceph. -# Требует минимум 3 ноды с незанятыми дисками для Ceph OSD. -# rook_ceph_mon_count: 3 -# rook_ceph_block_replica_count: 3 # для single-node задай 1 -# rook_ceph_block_storage_class: "rook-ceph-block" -# rook_ceph_filesystem_storage_class: "rook-ceph-filesystem" -# rook_ceph_dashboard_ingress_enabled: false -# rook_ceph_dashboard_ingress_host: "ceph.example.com" +# ─── CSI Ceph (ceph-csi: RBD + CephFS) ─────────────────────────────────────── +# Требует уже существующий Ceph-кластер (FSID, MON, user/key). +csi_ceph_driver_ref: "devel" # ветка/тег ceph/ceph-csi (devel|v3.x.x) +csi_ceph_namespace: "kube-system" +csi_ceph_cluster_id: "b9127830-b0cc-4e34-aa47-9d1a2e9949a8" # FSID (ceph fsid) +csi_ceph_monitors: + - "10.0.0.11:6789" + - "10.0.0.12:6789" + - "10.0.0.13:6789" +csi_ceph_user_id: "kubernetes" +csi_ceph_user_key: "{{ vault_csi_ceph_user_key }}" +csi_ceph_rbd_storage_class_name: "ceph-rbd" +csi_ceph_rbd_pool: "rbd" +csi_ceph_rbd_fs_type: "ext4" +csi_ceph_rbd_storage_class_default: false +csi_ceph_cephfs_storage_class_name: "cephfs" +csi_ceph_cephfs_fs_name: "cephfs" +csi_ceph_cephfs_pool: "cephfs_data" +csi_ceph_cephfs_storage_class_default: false +csi_ceph_reclaim_policy: "Delete" # Delete | Retain +csi_ceph_volume_binding_mode: "Immediate" # Immediate | WaitForFirstConsumer +csi_ceph_enable_rbd: true +csi_ceph_enable_cephfs: true # ─── CSI GlusterFS ──────────────────────────────────────────────────────────── # Требует внешний GlusterFS кластер + Heketi REST API. @@ -461,7 +584,9 @@ minio_api_ingress_host: "s3.example.com" # authelia_domain: "home.local" # базовый домен (session cookie domain) # authelia_two_factor_enabled: false # включить 2FA для защищённых сервисов # authelia_storage_type: "sqlite" # sqlite | postgresql -# authelia_redis_enabled: false # Redis для хранения сессий +# authelia_redis_mode: "auto" # auto | internal | external_redis | disabled +# authelia_redis_host: "redis-master.redis.svc.cluster.local" +# authelia_redis_port: 6379 # authelia_smtp_enabled: false # SMTP для сброса пароля и 2FA email # Домены с защитой (forward-auth): # authelia_protected_domains: [sonarr.home.local, radarr.home.local, ...] diff --git a/group_vars/all/vault.yml.example b/group_vars/all/vault.yml.example index b78078a..83c882e 100644 --- a/group_vars/all/vault.yml.example +++ b/group_vars/all/vault.yml.example @@ -69,6 +69,34 @@ vault_gitea_admin_password: "changeme-gitea" # Пароль пользователя БД gitea при внешнем PostgreSQL vault_gitea_db_password: "changeme-gitea-db" +# ─── GitLab ──────────────────────────────────────────────────────────────────── +# Пароль пользователя root в GitLab +vault_gitlab_admin_password: "changeme-gitlab" +# Пароль пользователя БД gitlab при внешнем PostgreSQL +vault_gitlab_db_password: "changeme-gitlab-db" + +# ─── Redis ───────────────────────────────────────────────────────────────────── +# Пароль Redis (если redis_auth_enabled: true) +vault_redis_password: "changeme-redis" + +# ─── MongoDB ─────────────────────────────────────────────────────────────────── +# Пароль root-пользователя MongoDB +vault_mongodb_root_password: "changeme-mongodb-root" +# Пароль прикладного пользователя MongoDB +vault_mongodb_password: "changeme-mongodb-app" + +# ─── Kafka ───────────────────────────────────────────────────────────────────── +# Пароль клиентского пользователя Kafka (SASL/SCRAM) +vault_kafka_client_password: "changeme-kafka-client" +# Пароль для входа в Kafka UI +vault_kafka_ui_password: "changeme-kafka-ui" + +# ─── RabbitMQ ────────────────────────────────────────────────────────────────── +# Пароль пользователя RabbitMQ +vault_rabbitmq_password: "changeme-rabbitmq" +# Erlang cookie для кластеризации RabbitMQ +vault_rabbitmq_erlang_cookie: "changeme-rabbitmq-cookie" + # ─── ownCloud OCIS ───────────────────────────────────────────────────────────── # Пароль первого администратора ownCloud vault_owncloud_admin_password: "changeme-owncloud" @@ -111,6 +139,10 @@ vault_azure_kv_client_secret: "" # Secret ID роли eso-role (см. addons/external-secrets/README.md) vault_eso_approle_secret_id: "" +# ─── CSI Ceph (ceph-csi) ────────────────────────────────────────────────────── +# CephX key пользователя client. (например client.kubernetes) +vault_csi_ceph_user_key: "" + # ─── Jenkins ─────────────────────────────────────────────────────────────────── # Пароль админа Jenkins (JCasC) vault_jenkins_admin_password: "changeme-jenkins" diff --git a/playbooks/addons.yml b/playbooks/addons.yml index 89725d5..24cb204 100644 --- a/playbooks/addons.yml +++ b/playbooks/addons.yml @@ -104,6 +104,54 @@ roles: - role: "{{ playbook_dir }}/../addons/mysql/role" +- name: Install Redis + hosts: k3s_master[0] + gather_facts: false + become: true + when: addon_redis | default(false) | bool + roles: + - role: "{{ playbook_dir }}/../addons/redis/role" + +- name: Install MongoDB + hosts: k3s_master[0] + gather_facts: false + become: true + when: addon_mongodb | default(false) | bool + roles: + - role: "{{ playbook_dir }}/../addons/mongodb/role" + +- name: Install Kafka + hosts: k3s_master[0] + gather_facts: false + become: true + when: addon_kafka | default(false) | bool + roles: + - role: "{{ playbook_dir }}/../addons/kafka/role" + +- name: Install Kafka UI + hosts: k3s_master[0] + gather_facts: false + become: true + when: addon_kafka_ui | default(false) | bool + roles: + - role: "{{ playbook_dir }}/../addons/kafka-ui/role" + +- name: Install RabbitMQ + hosts: k3s_master[0] + gather_facts: false + become: true + when: addon_rabbitmq | default(false) | bool + roles: + - role: "{{ playbook_dir }}/../addons/rabbitmq/role" + +- name: Install GitLab + hosts: k3s_master[0] + gather_facts: false + become: true + when: addon_gitlab | default(false) | bool + roles: + - role: "{{ playbook_dir }}/../addons/gitlab/role" + - name: Install Databasus hosts: k3s_master[0] gather_facts: false @@ -208,11 +256,11 @@ roles: - role: "{{ playbook_dir }}/../addons/csi-s3/role" -- name: Install Rook-Ceph +- name: Install Ceph-Rook hosts: k3s_master[0] gather_facts: false become: true - when: addon_csi_ceph | default(false) | bool + when: (addon_ceph_rock | default(false) | bool) or (addon_csi_ceph | default(false) | bool) roles: - role: "{{ playbook_dir }}/../addons/csi-ceph/role"