feat: добавлены аддоны CSI-S3, CSI-Ceph, CSI-GlusterFS, Vaultwarden
- CSI-S3 (ctrox/csi-s3): монтирование S3/MinIO бакетов как PVC, авто-интеграция с addon_minio через internal MinIO endpoint - Rook-Ceph (csi-ceph): distributed block (RWO) и filesystem (RWX) storage, оператор Helm + CephCluster CRD + StorageClasses, опциональный Dashboard Ingress - CSI GlusterFS: установка glusterfs-client на все ноды, CSI Driver из GitHub releases, StorageClass с Heketi provisioner, Endpoints для прямых подключений - Vaultwarden (guerzon/vaultwarden): self-hosted Bitwarden, авто-версия, SMTP smtp.yandex.ru:465/force_tls, WebSocket, ingress TLS, ServiceMonitor Обновлены: playbooks/addons.yml (8 пропущенных аддонов + 4 новых), group_vars/all/addons.yml (флаги + комментарии конфигурации), vault.yml.example (vaultwarden_admin_token, smtp_password, heketi_secret), Makefile (PHONY + 4 новых цели)
This commit is contained in:
17
Makefile
17
Makefile
@@ -56,6 +56,7 @@ DOCKER_RUN := docker run --rm -it \
|
||||
addon-minio addon-velero addon-crowdsec \
|
||||
addon-loki addon-promtail addon-tempo addon-pushgateway \
|
||||
addon-harbor addon-gitea addon-owncloud addon-nextcloud \
|
||||
addon-csi-s3 addon-csi-ceph addon-csi-glusterfs addon-vaultwarden \
|
||||
add-node remove-node \
|
||||
add-etcd-node remove-etcd-node \
|
||||
etcd-backup etcd-restore etcd-list-snapshots \
|
||||
@@ -369,6 +370,22 @@ addon-pushgateway: _check_env _check_image ## Установить Prometheus Pu
|
||||
@printf "$(CYAN)Устанавливаю Pushgateway...$(NC)\n"
|
||||
$(DOCKER_RUN) addon pushgateway $(ARGS)
|
||||
|
||||
addon-csi-s3: _check_env _check_image ## Установить CSI S3 Driver — монтирование S3/MinIO бакетов как PVC (авто-MinIO при addon_minio: true)
|
||||
@printf "$(CYAN)Устанавливаю CSI S3 Driver...$(NC)\n"
|
||||
$(DOCKER_RUN) addon csi-s3 $(ARGS)
|
||||
|
||||
addon-csi-ceph: _check_env _check_image ## Установить Rook-Ceph — distributed block (RWO) + filesystem (RWX) storage
|
||||
@printf "$(CYAN)Устанавливаю Rook-Ceph...$(NC)\n"
|
||||
$(DOCKER_RUN) addon csi-ceph $(ARGS)
|
||||
|
||||
addon-csi-glusterfs: _check_env _check_image ## Установить CSI GlusterFS Driver (требует внешний GlusterFS + Heketi, ARGS="-e csi_glusterfs_heketi_url=...")
|
||||
@printf "$(CYAN)Устанавливаю CSI GlusterFS Driver...$(NC)\n"
|
||||
$(DOCKER_RUN) addon csi-glusterfs $(ARGS)
|
||||
|
||||
addon-vaultwarden: _check_env _check_image ## Установить Vaultwarden — self-hosted Bitwarden (ARGS="-e vaultwarden_ingress_host=vault.example.com")
|
||||
@printf "$(CYAN)Устанавливаю Vaultwarden...$(NC)\n"
|
||||
$(DOCKER_RUN) addon vaultwarden $(ARGS)
|
||||
|
||||
# Generic цель — любой аддон из addons/<name>/playbook.yml
|
||||
addon-%: _check_env _check_image
|
||||
@if [ ! -f "addons/$*/playbook.yml" ]; then \
|
||||
|
||||
7
addons/csi-ceph/playbook.yml
Normal file
7
addons/csi-ceph/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install Csi Ceph
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
45
addons/csi-ceph/role/defaults/main.yml
Normal file
45
addons/csi-ceph/role/defaults/main.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
rook_ceph_version: "1.14.0"
|
||||
rook_ceph_namespace: "rook-ceph"
|
||||
rook_ceph_chart_repo: "https://charts.rook.io/release"
|
||||
|
||||
# Версия образа Ceph
|
||||
rook_ceph_image: "quay.io/ceph/ceph:v18.2.2"
|
||||
|
||||
# Мониторы (MON) — для single-node: count=1, allowMultiplePerNode=true
|
||||
rook_ceph_mon_count: 3
|
||||
rook_ceph_allow_multiple_mon_per_node: false
|
||||
|
||||
# Путь на хосте для хранения данных Ceph (OSD в filestore-режиме)
|
||||
# Для использования raw block-устройств — оставь пустым и задай rook_ceph_devices
|
||||
rook_ceph_data_dir: "/var/lib/rook"
|
||||
|
||||
# Raw block-устройства для OSD (например ["/dev/sdb", "/dev/sdc"])
|
||||
# Если пусто — Ceph использует директорию rook_ceph_data_dir
|
||||
rook_ceph_devices: []
|
||||
|
||||
# Использовать все доступные (не смонтированные) диски автоматически
|
||||
rook_ceph_use_all_devices: false
|
||||
|
||||
# Dashboard
|
||||
rook_ceph_dashboard_enabled: true
|
||||
|
||||
# StorageClasses
|
||||
rook_ceph_block_pool_name: "replicapool"
|
||||
rook_ceph_block_replica_count: 3 # уменьши до 1 для single-node
|
||||
rook_ceph_block_storage_class: "rook-ceph-block"
|
||||
rook_ceph_block_storage_class_default: false
|
||||
|
||||
rook_ceph_filesystem_name: "ceph-filesystem"
|
||||
rook_ceph_filesystem_storage_class: "rook-ceph-filesystem"
|
||||
|
||||
# Ingress для Ceph Dashboard
|
||||
rook_ceph_dashboard_ingress_enabled: false
|
||||
rook_ceph_dashboard_ingress_host: "ceph.local"
|
||||
rook_ceph_dashboard_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
|
||||
rook_ceph_dashboard_ingress_tls: false
|
||||
rook_ceph_dashboard_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
|
||||
|
||||
# Метрики
|
||||
rook_ceph_metrics_enabled: true
|
||||
# ServiceMonitor создаётся только когда addon_prometheus_stack: true
|
||||
98
addons/csi-ceph/role/tasks/main.yml
Normal file
98
addons/csi-ceph/role/tasks/main.yml
Normal file
@@ -0,0 +1,98 @@
|
||||
---
|
||||
- name: Add Rook Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: rook-release
|
||||
repo_url: "{{ rook_ceph_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Install Rook-Ceph operator via Helm
|
||||
kubernetes.core.helm:
|
||||
name: rook-ceph
|
||||
chart_ref: rook-release/rook-ceph
|
||||
chart_version: "{{ rook_ceph_version }}"
|
||||
release_namespace: "{{ rook_ceph_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "10m0s"
|
||||
values:
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
discover:
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
csi:
|
||||
provisionerTolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
pluginTolerations:
|
||||
- operator: "Exists"
|
||||
monitoring:
|
||||
enabled: "{{ rook_ceph_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Wait for Rook operator to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ rook_ceph_namespace }}
|
||||
rollout status deployment/rook-ceph-operator --timeout=180s
|
||||
changed_when: false
|
||||
retries: 3
|
||||
delay: 10
|
||||
|
||||
- name: Template CephCluster + StorageClasses manifest
|
||||
ansible.builtin.template:
|
||||
src: ceph-cluster.yaml.j2
|
||||
dest: /tmp/ceph-cluster.yaml
|
||||
mode: '0644'
|
||||
|
||||
- name: Apply CephCluster + StorageClasses
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/ceph-cluster.yaml
|
||||
changed_when: true
|
||||
|
||||
- name: Wait for Ceph monitors to be ready (может занять несколько минут)
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ rook_ceph_namespace }}
|
||||
wait cephcluster/rook-ceph
|
||||
--for=jsonpath='{.status.phase}'=Ready
|
||||
--timeout=600s
|
||||
changed_when: false
|
||||
retries: 5
|
||||
delay: 30
|
||||
failed_when: false
|
||||
|
||||
- name: Create Ceph Dashboard Ingress
|
||||
ansible.builtin.template:
|
||||
src: ceph-dashboard-ingress.yaml.j2
|
||||
dest: /tmp/ceph-dashboard-ingress.yaml
|
||||
mode: '0644'
|
||||
when: rook_ceph_dashboard_ingress_enabled | bool
|
||||
|
||||
- name: Apply Ceph Dashboard Ingress
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/ceph-dashboard-ingress.yaml
|
||||
changed_when: true
|
||||
when: rook_ceph_dashboard_ingress_enabled | bool
|
||||
|
||||
- name: Get Ceph Dashboard admin password
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ rook_ceph_namespace }}
|
||||
get secret rook-ceph-dashboard-password
|
||||
-o jsonpath='{.data.password}'
|
||||
register: _ceph_dashboard_password
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Show Rook-Ceph access info
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "Rook-Ceph установлен в namespace: {{ rook_ceph_namespace }}"
|
||||
- "StorageClass (block RWO): {{ rook_ceph_block_storage_class }}"
|
||||
- "StorageClass (filesystem RWX): {{ rook_ceph_filesystem_storage_class }}"
|
||||
- "Replicas: {{ rook_ceph_block_replica_count }} (для single-node задай rook_ceph_block_replica_count=1)"
|
||||
- "{% if rook_ceph_dashboard_ingress_enabled %}Dashboard: http{{ 's' if rook_ceph_dashboard_ingress_tls else '' }}://{{ rook_ceph_dashboard_ingress_host }}{% else %}Dashboard: kubectl port-forward svc/rook-ceph-mgr-dashboard -n {{ rook_ceph_namespace }} 7000:7000{% endif %}"
|
||||
- "Dashboard логин: admin / {{ _ceph_dashboard_password.stdout | b64decode if _ceph_dashboard_password.rc == 0 else '(пока создаётся)' }}"
|
||||
- "Статус кластера: kubectl -n {{ rook_ceph_namespace }} get cephcluster"
|
||||
- "Toolbox: kubectl -n {{ rook_ceph_namespace }} exec -it deploy/rook-ceph-tools -- bash"
|
||||
127
addons/csi-ceph/role/templates/ceph-cluster.yaml.j2
Normal file
127
addons/csi-ceph/role/templates/ceph-cluster.yaml.j2
Normal file
@@ -0,0 +1,127 @@
|
||||
---
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephCluster
|
||||
metadata:
|
||||
name: rook-ceph
|
||||
namespace: {{ rook_ceph_namespace }}
|
||||
spec:
|
||||
cephVersion:
|
||||
image: "{{ rook_ceph_image }}"
|
||||
allowUnsupported: false
|
||||
|
||||
dataDirHostPath: "{{ rook_ceph_data_dir }}"
|
||||
|
||||
mon:
|
||||
count: {{ rook_ceph_mon_count }}
|
||||
allowMultiplePerNode: {{ rook_ceph_allow_multiple_mon_per_node | lower }}
|
||||
|
||||
mgr:
|
||||
count: 1
|
||||
modules:
|
||||
- name: pg_autoscaler
|
||||
enabled: true
|
||||
|
||||
dashboard:
|
||||
enabled: {{ rook_ceph_dashboard_enabled | lower }}
|
||||
ssl: false
|
||||
|
||||
monitoring:
|
||||
enabled: {{ (rook_ceph_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool) | lower }}
|
||||
|
||||
network:
|
||||
connections:
|
||||
encryption:
|
||||
enabled: false
|
||||
compression:
|
||||
enabled: false
|
||||
|
||||
storage:
|
||||
useAllNodes: true
|
||||
useAllDevices: {{ rook_ceph_use_all_devices | lower }}
|
||||
{% if rook_ceph_devices %}
|
||||
devices:
|
||||
{% for dev in rook_ceph_devices %}
|
||||
- name: "{{ dev }}"
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
directories:
|
||||
- path: "{{ rook_ceph_data_dir }}"
|
||||
{% endif %}
|
||||
|
||||
placement:
|
||||
all:
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
disruptionManagement:
|
||||
managePodBudgets: true
|
||||
osdMaintenanceTimeout: 30
|
||||
---
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephBlockPool
|
||||
metadata:
|
||||
name: {{ rook_ceph_block_pool_name }}
|
||||
namespace: {{ rook_ceph_namespace }}
|
||||
spec:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: {{ rook_ceph_block_replica_count }}
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ rook_ceph_block_storage_class }}
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "{{ rook_ceph_block_storage_class_default | lower }}"
|
||||
provisioner: {{ rook_ceph_namespace }}.rbd.csi.ceph.com
|
||||
parameters:
|
||||
clusterID: {{ rook_ceph_namespace }}
|
||||
pool: {{ rook_ceph_block_pool_name }}
|
||||
imageFormat: "2"
|
||||
imageFeatures: layering
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_ceph_namespace }}
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: {{ rook_ceph_namespace }}
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: {{ rook_ceph_namespace }}
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephFilesystem
|
||||
metadata:
|
||||
name: {{ rook_ceph_filesystem_name }}
|
||||
namespace: {{ rook_ceph_namespace }}
|
||||
spec:
|
||||
metadataPool:
|
||||
replicated:
|
||||
size: {{ rook_ceph_block_replica_count }}
|
||||
dataPools:
|
||||
- name: data0
|
||||
replicated:
|
||||
size: {{ rook_ceph_block_replica_count }}
|
||||
preserveFilesystemOnDelete: true
|
||||
metadataServer:
|
||||
activeCount: 1
|
||||
activeStandby: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ rook_ceph_filesystem_storage_class }}
|
||||
provisioner: {{ rook_ceph_namespace }}.cephfs.csi.ceph.com
|
||||
parameters:
|
||||
clusterID: {{ rook_ceph_namespace }}
|
||||
fsName: {{ rook_ceph_filesystem_name }}
|
||||
pool: {{ rook_ceph_filesystem_name }}-data0
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: {{ rook_ceph_namespace }}
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: {{ rook_ceph_namespace }}
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: {{ rook_ceph_namespace }}
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
@@ -0,0 +1,29 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rook-ceph-mgr-dashboard
|
||||
namespace: {{ rook_ceph_namespace }}
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
{% if rook_ceph_dashboard_ingress_tls %}
|
||||
cert-manager.io/cluster-issuer: "{{ rook_ceph_dashboard_ingress_cert_issuer }}"
|
||||
{% endif %}
|
||||
spec:
|
||||
ingressClassName: "{{ rook_ceph_dashboard_ingress_class }}"
|
||||
rules:
|
||||
- host: "{{ rook_ceph_dashboard_ingress_host }}"
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: rook-ceph-mgr-dashboard
|
||||
port:
|
||||
number: 7000
|
||||
{% if rook_ceph_dashboard_ingress_tls %}
|
||||
tls:
|
||||
- secretName: ceph-dashboard-tls
|
||||
hosts:
|
||||
- "{{ rook_ceph_dashboard_ingress_host }}"
|
||||
{% endif %}
|
||||
7
addons/csi-glusterfs/playbook.yml
Normal file
7
addons/csi-glusterfs/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install Csi Glusterfs
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
32
addons/csi-glusterfs/role/defaults/main.yml
Normal file
32
addons/csi-glusterfs/role/defaults/main.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
# Версия CSI GlusterFS Driver
|
||||
# https://github.com/gluster/gluster-csi-driver/releases
|
||||
csi_glusterfs_version: "v1.0.0"
|
||||
csi_glusterfs_namespace: "kube-system"
|
||||
|
||||
# GlusterFS REST API (Heketi) для динамического provisioning
|
||||
# Heketi должен быть установлен отдельно на серверах GlusterFS
|
||||
csi_glusterfs_heketi_url: "http://heketi.glusterfs.local:8080"
|
||||
csi_glusterfs_heketi_user: "admin"
|
||||
csi_glusterfs_heketi_secret: "{{ vault_glusterfs_heketi_secret | default('changeme-heketi') }}"
|
||||
|
||||
# Cluster ID из Heketi (получить: heketi-cli cluster list)
|
||||
csi_glusterfs_cluster_id: ""
|
||||
|
||||
# StorageClass
|
||||
csi_glusterfs_storage_class_name: "glusterfs"
|
||||
csi_glusterfs_storage_class_default: false
|
||||
csi_glusterfs_reclaim_policy: "Delete" # Delete | Retain
|
||||
csi_glusterfs_volume_type: "replicate:3" # replicate:3 | none | disperse:4:2
|
||||
|
||||
# GlusterFS серверы (hostnames/IPs нод GlusterFS кластера)
|
||||
# Используются для прямого подключения без Heketi
|
||||
csi_glusterfs_endpoints: []
|
||||
# Пример:
|
||||
# csi_glusterfs_endpoints:
|
||||
# - name: "gluster01"
|
||||
# ip: "192.168.1.10"
|
||||
# - name: "gluster02"
|
||||
# ip: "192.168.1.11"
|
||||
# - name: "gluster03"
|
||||
# ip: "192.168.1.12"
|
||||
63
addons/csi-glusterfs/role/tasks/main.yml
Normal file
63
addons/csi-glusterfs/role/tasks/main.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
# Устанавливаем glusterfs-client на все ноды кластера
|
||||
- name: Install GlusterFS client on all K3S nodes
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- glusterfs-client
|
||||
- attr
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
delegate_to: "{{ item }}"
|
||||
loop: "{{ groups['k3s_master'] + groups.get('k3s_worker', []) }}"
|
||||
run_once: false
|
||||
|
||||
- name: Load fuse kernel module
|
||||
community.general.modprobe:
|
||||
name: fuse
|
||||
state: present
|
||||
become: true
|
||||
delegate_to: "{{ item }}"
|
||||
loop: "{{ groups['k3s_master'] + groups.get('k3s_worker', []) }}"
|
||||
|
||||
# Устанавливаем CSI GlusterFS Driver из официальных манифестов
|
||||
- name: Download GlusterFS CSI Driver manifest
|
||||
ansible.builtin.get_url:
|
||||
url: "https://raw.githubusercontent.com/gluster/gluster-csi-driver/{{ csi_glusterfs_version }}/deploy/glusterfs-csi-driver.yaml"
|
||||
dest: /tmp/glusterfs-csi-driver.yaml
|
||||
mode: '0644'
|
||||
failed_when: false
|
||||
|
||||
- name: Apply GlusterFS CSI Driver manifest
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/glusterfs-csi-driver.yaml
|
||||
changed_when: true
|
||||
failed_when: false
|
||||
|
||||
- name: Template GlusterFS StorageClass and resources
|
||||
ansible.builtin.template:
|
||||
src: glusterfs-resources.yaml.j2
|
||||
dest: /tmp/glusterfs-resources.yaml
|
||||
mode: '0644'
|
||||
|
||||
- name: Apply GlusterFS resources (Secret, Endpoints, StorageClass)
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/glusterfs-resources.yaml
|
||||
changed_when: true
|
||||
|
||||
- name: Show CSI-GlusterFS access info
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "CSI GlusterFS установлен"
|
||||
- "StorageClass: {{ csi_glusterfs_storage_class_name }}"
|
||||
- "Heketi URL: {{ csi_glusterfs_heketi_url }}"
|
||||
- "Volume type: {{ csi_glusterfs_volume_type }}"
|
||||
- ""
|
||||
- "ВАЖНО: Требует внешний GlusterFS кластер с Heketi REST API!"
|
||||
- " 1. Установи GlusterFS на отдельные серверы"
|
||||
- " 2. Установи Heketi для REST API управления томами"
|
||||
- " 3. Задай csi_glusterfs_heketi_url и vault_glusterfs_heketi_secret"
|
||||
- " 4. Для cluster_id: heketi-cli cluster list"
|
||||
- ""
|
||||
- "Пример PVC:"
|
||||
- " storageClassName: {{ csi_glusterfs_storage_class_name }}"
|
||||
- " accessModes: [ReadWriteMany]"
|
||||
- " resources.requests.storage: 10Gi"
|
||||
@@ -0,0 +1,58 @@
|
||||
---
|
||||
# Heketi credentials Secret
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: heketi-secret
|
||||
namespace: {{ csi_glusterfs_namespace }}
|
||||
type: kubernetes.io/glusterfs
|
||||
stringData:
|
||||
key: "{{ csi_glusterfs_heketi_secret }}"
|
||||
---
|
||||
# GlusterFS Endpoints (прямые адреса нод кластера GlusterFS)
|
||||
{% if csi_glusterfs_endpoints %}
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: glusterfs-cluster
|
||||
namespace: {{ csi_glusterfs_namespace }}
|
||||
subsets:
|
||||
- addresses:
|
||||
{% for ep in csi_glusterfs_endpoints %}
|
||||
- ip: "{{ ep.ip }}"
|
||||
hostname: "{{ ep.name }}"
|
||||
{% endfor %}
|
||||
ports:
|
||||
- port: 1
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: glusterfs-cluster
|
||||
namespace: {{ csi_glusterfs_namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 1
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
---
|
||||
# StorageClass с Heketi provisioner
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ csi_glusterfs_storage_class_name }}
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "{{ csi_glusterfs_storage_class_default | lower }}"
|
||||
provisioner: gluster.org/glusterfile
|
||||
reclaimPolicy: {{ csi_glusterfs_reclaim_policy }}
|
||||
allowVolumeExpansion: true
|
||||
parameters:
|
||||
resturl: "{{ csi_glusterfs_heketi_url }}"
|
||||
restuser: "{{ csi_glusterfs_heketi_user }}"
|
||||
secretNamespace: "{{ csi_glusterfs_namespace }}"
|
||||
secretName: "heketi-secret"
|
||||
{% if csi_glusterfs_cluster_id %}
|
||||
clusterid: "{{ csi_glusterfs_cluster_id }}"
|
||||
{% endif %}
|
||||
volumetype: "{{ csi_glusterfs_volume_type }}"
|
||||
7
addons/csi-s3/playbook.yml
Normal file
7
addons/csi-s3/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install Csi S3
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
21
addons/csi-s3/role/defaults/main.yml
Normal file
21
addons/csi-s3/role/defaults/main.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
csi_s3_version: "0.4.0"
|
||||
csi_s3_namespace: "kube-system"
|
||||
csi_s3_chart_repo: "https://ctrox.github.io/csi-s3/"
|
||||
|
||||
# Монтировщик: geesefs (рекомендуется) | s3fs | rclone
|
||||
csi_s3_mounter: "geesefs"
|
||||
|
||||
# StorageClass
|
||||
csi_s3_storage_class_name: "csi-s3"
|
||||
csi_s3_storage_class_default: false
|
||||
csi_s3_reclaim_policy: "Delete" # Delete | Retain
|
||||
|
||||
# S3/MinIO endpoint — автоматически берётся из addon_minio если установлен
|
||||
csi_s3_endpoint: "{{ 'http://minio.minio.svc.cluster.local:9000' if addon_minio | default(false) | bool else '' }}"
|
||||
csi_s3_access_key: "{{ vault_minio_root_user | default('') }}"
|
||||
csi_s3_secret_key: "{{ vault_minio_root_password | default('') }}"
|
||||
csi_s3_region: "us-east-1"
|
||||
|
||||
# Имя бакета для CSI (если пусто — создаётся динамически под каждый PVC)
|
||||
csi_s3_bucket: ""
|
||||
53
addons/csi-s3/role/tasks/main.yml
Normal file
53
addons/csi-s3/role/tasks/main.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: Add csi-s3 Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: csi-s3
|
||||
repo_url: "{{ csi_s3_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Install csi-s3 via Helm
|
||||
kubernetes.core.helm:
|
||||
name: csi-s3
|
||||
chart_ref: csi-s3/csi-s3
|
||||
chart_version: "{{ csi_s3_version }}"
|
||||
release_namespace: "{{ csi_s3_namespace }}"
|
||||
create_namespace: false
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values:
|
||||
secret:
|
||||
create: true
|
||||
name: csi-s3-secret
|
||||
accessKey: "{{ csi_s3_access_key }}"
|
||||
secretKey: "{{ csi_s3_secret_key }}"
|
||||
endpoint: "{{ csi_s3_endpoint }}"
|
||||
region: "{{ csi_s3_region }}"
|
||||
bucket: "{{ csi_s3_bucket }}"
|
||||
|
||||
storageClass:
|
||||
create: true
|
||||
name: "{{ csi_s3_storage_class_name }}"
|
||||
singleBucket: "{{ csi_s3_bucket }}"
|
||||
mounter: "{{ csi_s3_mounter }}"
|
||||
reclaimPolicy: "{{ csi_s3_reclaim_policy }}"
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "{{ csi_s3_storage_class_default | lower }}"
|
||||
|
||||
driver:
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Show CSI-S3 access info
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "CSI-S3 установлен"
|
||||
- "StorageClass: {{ csi_s3_storage_class_name }}"
|
||||
- "Endpoint: {{ csi_s3_endpoint }}"
|
||||
- "Mounter: {{ csi_s3_mounter }}"
|
||||
- "Пример PVC:"
|
||||
- " storageClassName: {{ csi_s3_storage_class_name }}"
|
||||
- " accessModes: [ReadWriteMany]"
|
||||
- " resources.requests.storage: 5Gi"
|
||||
7
addons/vaultwarden/playbook.yml
Normal file
7
addons/vaultwarden/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install Vaultwarden
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
53
addons/vaultwarden/role/defaults/main.yml
Normal file
53
addons/vaultwarden/role/defaults/main.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
vaultwarden_version: "" # "" = автоматически последняя версия чарта
|
||||
vaultwarden_namespace: "vaultwarden"
|
||||
vaultwarden_chart_repo: "https://guerzon.github.io/vaultwarden/"
|
||||
|
||||
# ── Основные настройки ────────────────────────────────────────────────────────
|
||||
# Публичный URL Vaultwarden (используется для WebAuthn, email-ссылок и т.д.)
|
||||
vaultwarden_domain: "https://vault.antropoff.ru"
|
||||
|
||||
# Регистрация новых пользователей
|
||||
vaultwarden_signups_allowed: false
|
||||
|
||||
# WebSocket (нужен для живых уведомлений в клиентах)
|
||||
vaultwarden_websocket_enabled: true
|
||||
|
||||
# ── Admin Panel ───────────────────────────────────────────────────────────────
|
||||
# Токен доступа к /admin — задай в vault.yml: vault_vaultwarden_admin_token
|
||||
# Оставь пустым чтобы отключить панель администратора
|
||||
vaultwarden_admin_token: "{{ vault_vaultwarden_admin_token | default('') }}"
|
||||
|
||||
# ── SMTP ──────────────────────────────────────────────────────────────────────
|
||||
vaultwarden_smtp_enabled: false # включить только если заданы SMTP настройки
|
||||
vaultwarden_smtp_host: "smtp.yandex.ru"
|
||||
vaultwarden_smtp_from: "vault@antropoff.ru"
|
||||
vaultwarden_smtp_from_name: "Vaultwarden"
|
||||
vaultwarden_smtp_port: 465
|
||||
vaultwarden_smtp_security: "force_tls" # force_tls | starttls | off
|
||||
vaultwarden_smtp_username: "sergey@antropoff.ru"
|
||||
# Пароль задаётся в vault.yml: vault_vaultwarden_smtp_password
|
||||
vaultwarden_smtp_password: "{{ vault_vaultwarden_smtp_password | default('') }}"
|
||||
|
||||
# ── Ingress ───────────────────────────────────────────────────────────────────
|
||||
vaultwarden_ingress_enabled: true
|
||||
vaultwarden_ingress_host: "vault.antropoff.ru"
|
||||
vaultwarden_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
|
||||
vaultwarden_ingress_tls: true
|
||||
vaultwarden_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
|
||||
|
||||
# ── Хранилище ─────────────────────────────────────────────────────────────────
|
||||
vaultwarden_storage_size: "1Gi"
|
||||
vaultwarden_storage_class: ""
|
||||
|
||||
# ── Метрики ───────────────────────────────────────────────────────────────────
|
||||
vaultwarden_metrics_enabled: true
|
||||
# ServiceMonitor создаётся только когда addon_prometheus_stack: true
|
||||
|
||||
vaultwarden_resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 256Mi
|
||||
59
addons/vaultwarden/role/tasks/main.yml
Normal file
59
addons/vaultwarden/role/tasks/main.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
- name: Add Vaultwarden Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: vaultwarden
|
||||
repo_url: "{{ vaultwarden_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Get latest Vaultwarden chart version
|
||||
ansible.builtin.shell: |
|
||||
helm search repo vaultwarden/vaultwarden --output json | \
|
||||
python3 -c "import sys,json; print(json.load(sys.stdin)[0]['version'])"
|
||||
register: _vaultwarden_latest_version
|
||||
changed_when: false
|
||||
when: vaultwarden_version == ""
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Set Vaultwarden chart version
|
||||
ansible.builtin.set_fact:
|
||||
_vaultwarden_version: "{{ vaultwarden_version if vaultwarden_version != '' else _vaultwarden_latest_version.stdout | trim }}"
|
||||
|
||||
- name: Template Vaultwarden values
|
||||
ansible.builtin.template:
|
||||
src: vaultwarden-values.yaml.j2
|
||||
dest: /tmp/vaultwarden-values.yaml
|
||||
mode: '0600'
|
||||
|
||||
- name: Install Vaultwarden via Helm
|
||||
kubernetes.core.helm:
|
||||
name: vaultwarden
|
||||
chart_ref: vaultwarden/vaultwarden
|
||||
chart_version: "{{ _vaultwarden_version }}"
|
||||
release_namespace: "{{ vaultwarden_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values_files:
|
||||
- /tmp/vaultwarden-values.yaml
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Wait for Vaultwarden to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ vaultwarden_namespace }}
|
||||
rollout status deployment/vaultwarden --timeout=120s
|
||||
changed_when: false
|
||||
retries: 3
|
||||
delay: 10
|
||||
|
||||
- name: Show Vaultwarden access info
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "Vaultwarden установлен в namespace: {{ vaultwarden_namespace }}"
|
||||
- "URL: {{ vaultwarden_domain }}"
|
||||
- "Admin panel: {{ vaultwarden_domain }}/admin"
|
||||
- "{% if vaultwarden_admin_token %}Admin token задан (из vault.yml){% else %}Admin panel отключена (admin_token не задан){% endif %}"
|
||||
- "Регистрация: {{ 'разрешена' if vaultwarden_signups_allowed else 'запрещена' }}"
|
||||
- "SMTP: {{ 'включён (' + vaultwarden_smtp_host + ':' + vaultwarden_smtp_port | string + ')' if vaultwarden_smtp_enabled else 'отключён' }}"
|
||||
59
addons/vaultwarden/role/templates/vaultwarden-values.yaml.j2
Normal file
59
addons/vaultwarden/role/templates/vaultwarden-values.yaml.j2
Normal file
@@ -0,0 +1,59 @@
|
||||
domain: "{{ vaultwarden_domain }}"
|
||||
|
||||
signupsAllowed: {{ vaultwarden_signups_allowed | lower }}
|
||||
|
||||
websocket:
|
||||
enabled: {{ vaultwarden_websocket_enabled | lower }}
|
||||
|
||||
adminToken:
|
||||
value: "{{ vaultwarden_admin_token }}"
|
||||
|
||||
smtp:
|
||||
enabled: {{ vaultwarden_smtp_enabled | lower }}
|
||||
{% if vaultwarden_smtp_enabled | bool %}
|
||||
host: "{{ vaultwarden_smtp_host }}"
|
||||
from: "{{ vaultwarden_smtp_from }}"
|
||||
fromName: "{{ vaultwarden_smtp_from_name }}"
|
||||
port: {{ vaultwarden_smtp_port }}
|
||||
security: "{{ vaultwarden_smtp_security }}"
|
||||
username: "{{ vaultwarden_smtp_username }}"
|
||||
password: "{{ vaultwarden_smtp_password }}"
|
||||
{% endif %}
|
||||
|
||||
ingress:
|
||||
enabled: {{ vaultwarden_ingress_enabled | lower }}
|
||||
{% if vaultwarden_ingress_enabled | bool %}
|
||||
hostname: "{{ vaultwarden_ingress_host }}"
|
||||
ingressClassName: "{{ vaultwarden_ingress_class }}"
|
||||
tls: {{ vaultwarden_ingress_tls | lower }}
|
||||
{% if vaultwarden_ingress_tls | bool %}
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "{{ vaultwarden_ingress_cert_issuer }}"
|
||||
tlsSecret: "vaultwarden-tls"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
storage:
|
||||
data:
|
||||
accessMode: ReadWriteOnce
|
||||
size: "{{ vaultwarden_storage_size }}"
|
||||
{% if vaultwarden_storage_class %}
|
||||
storageClass: "{{ vaultwarden_storage_class }}"
|
||||
{% endif %}
|
||||
|
||||
metrics:
|
||||
enabled: {{ vaultwarden_metrics_enabled | lower }}
|
||||
{% if vaultwarden_metrics_enabled | bool %}
|
||||
serviceMonitor:
|
||||
enabled: {{ (addon_prometheus_stack | default(false)) | lower }}
|
||||
labels:
|
||||
release: kube-prometheus-stack
|
||||
{% endif %}
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "{{ vaultwarden_resources.requests.cpu }}"
|
||||
memory: "{{ vaultwarden_resources.requests.memory }}"
|
||||
limits:
|
||||
cpu: "{{ vaultwarden_resources.limits.cpu }}"
|
||||
memory: "{{ vaultwarden_resources.limits.memory }}"
|
||||
@@ -29,6 +29,10 @@ addon_loki: false # Loki (агрегация логов)
|
||||
addon_promtail: false # Promtail (агент сбора логов → Loki)
|
||||
addon_tempo: false # Tempo (distributed tracing)
|
||||
addon_pushgateway: false # Prometheus Pushgateway (метрики batch-задач)
|
||||
addon_csi_s3: false # CSI S3 Driver (объектное хранилище как PVC, авто-MinIO)
|
||||
addon_csi_ceph: false # CSI Ceph / Rook-Ceph (distributed block + filesystem storage)
|
||||
addon_csi_glusterfs: false # CSI GlusterFS Driver (требует внешний GlusterFS + Heketi)
|
||||
addon_vaultwarden: false # Vaultwarden (self-hosted Bitwarden-совместимый менеджер паролей)
|
||||
|
||||
# ─── NFS Server ───────────────────────────────────────────────────────────────
|
||||
nfs_exports:
|
||||
@@ -241,6 +245,48 @@ minio_api_ingress_host: "s3.example.com"
|
||||
# pushgateway_ingress_host: "pushgateway.example.com"
|
||||
# pushgateway_persistence_enabled: false # сохранять метрики между рестартами
|
||||
|
||||
# ─── CSI S3 ───────────────────────────────────────────────────────────────────
|
||||
# Монтирует S3/MinIO бакеты как PersistentVolume в поды.
|
||||
# При addon_minio: true — автоматически использует внутренний MinIO.
|
||||
# csi_s3_mounter: "geesefs" # geesefs | s3fs | rclone
|
||||
# csi_s3_endpoint: "" # авто-MinIO если addon_minio: true
|
||||
# csi_s3_access_key: "" # авто из vault_minio_root_user
|
||||
# csi_s3_secret_key: "" # авто из vault_minio_root_password
|
||||
|
||||
# ─── CSI Ceph / Rook-Ceph ─────────────────────────────────────────────────────
|
||||
# Distributed block (RWO) и filesystem (RWX) storage на базе Ceph.
|
||||
# Требует минимум 3 ноды с незанятыми дисками для Ceph OSD.
|
||||
# rook_ceph_mon_count: 3
|
||||
# rook_ceph_block_replica_count: 3 # для single-node задай 1
|
||||
# rook_ceph_block_storage_class: "rook-ceph-block"
|
||||
# rook_ceph_filesystem_storage_class: "rook-ceph-filesystem"
|
||||
# rook_ceph_dashboard_ingress_enabled: false
|
||||
# rook_ceph_dashboard_ingress_host: "ceph.example.com"
|
||||
|
||||
# ─── CSI GlusterFS ────────────────────────────────────────────────────────────
|
||||
# Требует внешний GlusterFS кластер + Heketi REST API.
|
||||
# csi_glusterfs_heketi_url: "http://heketi.glusterfs.local:8080"
|
||||
# csi_glusterfs_cluster_id: "" # heketi-cli cluster list
|
||||
# csi_glusterfs_volume_type: "replicate:3"
|
||||
# csi_glusterfs_endpoints: # прямое подключение без Heketi
|
||||
# - name: "gluster01"
|
||||
# ip: "192.168.1.10"
|
||||
|
||||
# ─── Vaultwarden ──────────────────────────────────────────────────────────────
|
||||
# Self-hosted Bitwarden-совместимый менеджер паролей.
|
||||
# Секреты задаются в vault.yml:
|
||||
# vault_vaultwarden_admin_token: "..." # токен доступа к /admin
|
||||
# vault_vaultwarden_smtp_password: "..." # пароль SMTP
|
||||
# vaultwarden_domain: "https://vault.example.com"
|
||||
# vaultwarden_signups_allowed: false
|
||||
# vaultwarden_ingress_host: "vault.example.com"
|
||||
# vaultwarden_smtp_enabled: false
|
||||
# vaultwarden_smtp_host: "smtp.yandex.ru"
|
||||
# vaultwarden_smtp_from: "vault@example.com"
|
||||
# vaultwarden_smtp_port: 465
|
||||
# vaultwarden_smtp_security: "force_tls" # force_tls | starttls | off
|
||||
# vaultwarden_smtp_username: "user@example.com"
|
||||
|
||||
# ─── etcd backup ──────────────────────────────────────────────────────────────
|
||||
etcd_backup_dir: "{{ k3s_data_dir }}/server/db/snapshots"
|
||||
etcd_backup_retention: 5 # сколько снимков хранить
|
||||
|
||||
@@ -62,3 +62,10 @@ vault_owncloud_admin_password: "changeme-owncloud"
|
||||
# ─── Nextcloud ─────────────────────────────────────────────────────────────────
|
||||
vault_nextcloud_admin_password: "changeme-nextcloud"
|
||||
vault_nextcloud_db_password: "changeme-nextcloud-db" # пароль user 'nextcloud' в PostgreSQL
|
||||
|
||||
# ─── Vaultwarden ───────────────────────────────────────────────────────────────
|
||||
vault_vaultwarden_admin_token: "9R*eQ;G1M#)+Uw(afhoJ" # токен для /admin панели
|
||||
vault_vaultwarden_smtp_password: "fntwztnkacanpbwa" # пароль SMTP (Yandex App Password)
|
||||
|
||||
# ─── CSI GlusterFS / Heketi ────────────────────────────────────────────────────
|
||||
vault_glusterfs_heketi_secret: "changeme-heketi" # пароль Heketi admin
|
||||
|
||||
@@ -135,3 +135,99 @@
|
||||
when: addon_crowdsec | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/crowdsec/role"
|
||||
|
||||
- name: Install Harbor
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_harbor | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/harbor/role"
|
||||
|
||||
- name: Install Gitea
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_gitea | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/gitea/role"
|
||||
|
||||
- name: Install ownCloud OCIS
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_owncloud | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/owncloud/role"
|
||||
|
||||
- name: Install Nextcloud
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_nextcloud | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/nextcloud/role"
|
||||
|
||||
- name: Install Loki
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_loki | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/loki/role"
|
||||
|
||||
- name: Install Promtail
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_promtail | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/promtail/role"
|
||||
|
||||
- name: Install Tempo
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_tempo | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/tempo/role"
|
||||
|
||||
- name: Install Pushgateway
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_pushgateway | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/pushgateway/role"
|
||||
|
||||
- name: Install CSI S3 Driver
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_csi_s3 | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/csi-s3/role"
|
||||
|
||||
- name: Install Rook-Ceph
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_csi_ceph | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/csi-ceph/role"
|
||||
|
||||
- name: Install CSI GlusterFS Driver
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: true
|
||||
become: true
|
||||
when: addon_csi_glusterfs | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/csi-glusterfs/role"
|
||||
|
||||
- name: Install Vaultwarden
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
when: addon_vaultwarden | default(false) | bool
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/../addons/vaultwarden/role"
|
||||
|
||||
Reference in New Issue
Block a user