небольше фиксы
This commit is contained in:
6
addons/argocd/playbook.yml
Normal file
6
addons/argocd/playbook.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Install ArgoCD
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
21
addons/argocd/role/defaults/main.yml
Normal file
21
addons/argocd/role/defaults/main.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
argocd_version: "7.3.11"
|
||||
argocd_namespace: "argocd"
|
||||
argocd_chart_repo: "https://argoproj.github.io/argo-helm"
|
||||
|
||||
# Отключает встроенный TLS в argocd-server — обязательно при работе за Ingress
|
||||
argocd_insecure: true
|
||||
|
||||
argocd_ingress_enabled: false
|
||||
argocd_ingress_host: "argocd.example.com"
|
||||
argocd_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
|
||||
argocd_ingress_tls: "{{ cert_manager_enabled | default(false) | bool }}"
|
||||
argocd_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
|
||||
|
||||
argocd_resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
78
addons/argocd/role/tasks/main.yml
Normal file
78
addons/argocd/role/tasks/main.yml
Normal file
@@ -0,0 +1,78 @@
|
||||
---
|
||||
- name: Add argo Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: argo
|
||||
repo_url: "{{ argocd_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Install ArgoCD via Helm
|
||||
kubernetes.core.helm:
|
||||
name: argocd
|
||||
chart_ref: argo/argo-cd
|
||||
chart_version: "{{ argocd_version }}"
|
||||
release_namespace: "{{ argocd_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "10m0s"
|
||||
values:
|
||||
global:
|
||||
logging:
|
||||
level: warn
|
||||
server:
|
||||
insecure: "{{ argocd_insecure | bool }}"
|
||||
resources: "{{ argocd_resources }}"
|
||||
repoServer:
|
||||
resources: "{{ argocd_resources }}"
|
||||
applicationSet:
|
||||
resources: "{{ argocd_resources }}"
|
||||
notifications:
|
||||
resources: "{{ argocd_resources }}"
|
||||
redis:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Create ArgoCD Ingress
|
||||
ansible.builtin.template:
|
||||
src: ingress.yaml.j2
|
||||
dest: /tmp/argocd-ingress.yaml
|
||||
mode: '0644'
|
||||
when: argocd_ingress_enabled | bool
|
||||
|
||||
- name: Apply ArgoCD Ingress
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/argocd-ingress.yaml
|
||||
changed_when: true
|
||||
when: argocd_ingress_enabled | bool
|
||||
|
||||
- name: Get ArgoCD initial admin password
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ argocd_namespace }} get secret argocd-initial-admin-secret
|
||||
-o jsonpath={.data.password}
|
||||
register: argocd_password_b64
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Show ArgoCD access info
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
ArgoCD установлен.
|
||||
|
||||
{% if argocd_ingress_enabled | bool %}
|
||||
URL: http{{ 's' if argocd_ingress_tls | bool else '' }}://{{ argocd_ingress_host }}
|
||||
{% else %}
|
||||
Port-forward: kubectl port-forward svc/argocd-server -n {{ argocd_namespace }} 8080:80
|
||||
Открой: http://localhost:8080
|
||||
{% endif %}
|
||||
|
||||
Логин: admin
|
||||
Пароль: {{ argocd_password_b64.stdout | b64decode if argocd_password_b64.rc == 0 else '(не найден — смени через argocd CLI)' }}
|
||||
|
||||
Смени пароль после первого входа!
|
||||
После смены удали секрет: kubectl -n {{ argocd_namespace }} delete secret argocd-initial-admin-secret
|
||||
31
addons/argocd/role/templates/ingress.yaml.j2
Normal file
31
addons/argocd/role/templates/ingress.yaml.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: argocd-server
|
||||
namespace: {{ argocd_namespace }}
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
{% if argocd_ingress_tls | bool %}
|
||||
cert-manager.io/cluster-issuer: "{{ argocd_ingress_cert_issuer }}"
|
||||
{% endif %}
|
||||
spec:
|
||||
ingressClassName: {{ argocd_ingress_class }}
|
||||
rules:
|
||||
- host: {{ argocd_ingress_host }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: argocd-server
|
||||
port:
|
||||
name: http
|
||||
{% if argocd_ingress_tls | bool %}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ argocd_ingress_host }}
|
||||
secretName: argocd-tls
|
||||
{% endif %}
|
||||
7
addons/cert-manager/playbook.yml
Normal file
7
addons/cert-manager/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install cert-manager
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
28
addons/cert-manager/role/defaults/main.yml
Normal file
28
addons/cert-manager/role/defaults/main.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
cert_manager_version: "v1.15.3"
|
||||
cert_manager_namespace: "cert-manager"
|
||||
cert_manager_chart_repo: "https://charts.jetstack.io"
|
||||
|
||||
# ClusterIssuer: none | selfsigned | letsencrypt
|
||||
# При letsencrypt создаются оба: letsencrypt-staging и letsencrypt-prod
|
||||
cert_manager_issuer: "letsencrypt"
|
||||
|
||||
# Let's Encrypt (нужен если cert_manager_issuer: letsencrypt)
|
||||
cert_manager_acme_email: "admin@example.com"
|
||||
|
||||
cert_manager_acme_servers:
|
||||
prod: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
staging: "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
|
||||
# Имя ClusterIssuer для аннотаций Ingress:
|
||||
# cert-manager.io/cluster-issuer: "{{ cert_manager_default_issuer_name }}"
|
||||
# cert-manager автоматически обновляет сертификаты за 30 дней до истечения — вручную ничего делать не нужно.
|
||||
cert_manager_default_issuer_name: "letsencrypt-prod"
|
||||
|
||||
cert_manager_resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
101
addons/cert-manager/role/tasks/main.yml
Normal file
101
addons/cert-manager/role/tasks/main.yml
Normal file
@@ -0,0 +1,101 @@
|
||||
---
|
||||
- name: Add Jetstack Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: jetstack
|
||||
repo_url: "{{ cert_manager_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Install cert-manager via Helm
|
||||
kubernetes.core.helm:
|
||||
name: cert-manager
|
||||
chart_ref: jetstack/cert-manager
|
||||
chart_version: "{{ cert_manager_version }}"
|
||||
release_namespace: "{{ cert_manager_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values:
|
||||
installCRDs: true
|
||||
resources: "{{ cert_manager_resources }}"
|
||||
webhook:
|
||||
resources: "{{ cert_manager_resources }}"
|
||||
cainjector:
|
||||
resources: "{{ cert_manager_resources }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
register: cert_manager_deploy
|
||||
|
||||
- name: Wait for cert-manager webhook to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ cert_manager_namespace }} rollout status
|
||||
deployment/cert-manager-webhook --timeout=120s
|
||||
changed_when: false
|
||||
retries: 5
|
||||
delay: 10
|
||||
|
||||
- name: Create self-signed ClusterIssuer
|
||||
ansible.builtin.template:
|
||||
src: clusterissuer-selfsigned.yaml.j2
|
||||
dest: /tmp/cert-manager-selfsigned-issuer.yaml
|
||||
mode: '0644'
|
||||
when: cert_manager_issuer == 'selfsigned'
|
||||
|
||||
- name: Apply self-signed ClusterIssuer
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl apply -f /tmp/cert-manager-selfsigned-issuer.yaml
|
||||
changed_when: true
|
||||
retries: 5
|
||||
delay: 10
|
||||
when: cert_manager_issuer == 'selfsigned'
|
||||
|
||||
- name: Create Let's Encrypt ClusterIssuers (staging + prod)
|
||||
ansible.builtin.template:
|
||||
src: clusterissuer-letsencrypt.yaml.j2
|
||||
dest: "/tmp/cert-manager-letsencrypt-{{ item }}-issuer.yaml"
|
||||
mode: '0644'
|
||||
loop:
|
||||
- staging
|
||||
- prod
|
||||
vars:
|
||||
cert_manager_acme_server: "{{ item }}"
|
||||
when: cert_manager_issuer == 'letsencrypt'
|
||||
|
||||
- name: Apply Let's Encrypt ClusterIssuers (staging + prod)
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl apply -f /tmp/cert-manager-letsencrypt-{{ item }}-issuer.yaml
|
||||
loop:
|
||||
- staging
|
||||
- prod
|
||||
changed_when: true
|
||||
retries: 5
|
||||
delay: 10
|
||||
when: cert_manager_issuer == 'letsencrypt'
|
||||
|
||||
- name: Let's Encrypt auto-renewal info
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
cert-manager автоматически обновляет сертификаты за ~30 дней до истечения.
|
||||
Для выпуска сертификата добавь аннотацию к Ingress:
|
||||
cert-manager.io/cluster-issuer: "{{ cert_manager_default_issuer_name }}"
|
||||
Issuer'ы: letsencrypt-staging (для тестов), letsencrypt-prod (боевые сертификаты).
|
||||
when: cert_manager_issuer == 'letsencrypt'
|
||||
|
||||
- name: Verify cert-manager pods
|
||||
ansible.builtin.command: k3s kubectl -n {{ cert_manager_namespace }} get pods
|
||||
register: cm_pods
|
||||
changed_when: false
|
||||
|
||||
- name: Show cert-manager pods
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ cm_pods.stdout_lines }}"
|
||||
|
||||
- name: Show ClusterIssuers
|
||||
ansible.builtin.command: k3s kubectl get clusterissuer
|
||||
register: cm_issuers
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Display ClusterIssuers
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ cm_issuers.stdout_lines }}"
|
||||
@@ -0,0 +1,17 @@
|
||||
---
|
||||
# Let's Encrypt ClusterIssuer ({{ cert_manager_acme_server }})
|
||||
# Требует: публичный домен + HTTP-01 challenge через ingress-nginx
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-{{ cert_manager_acme_server }}
|
||||
spec:
|
||||
acme:
|
||||
server: "{{ cert_manager_acme_servers[cert_manager_acme_server] }}"
|
||||
email: "{{ cert_manager_acme_email }}"
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-{{ cert_manager_acme_server }}-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
ingressClassName: "{{ ingress_nginx_class_name | default('nginx') }}"
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Self-signed root CA — для внутренних сертификатов
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: selfsigned-issuer
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
# Самоподписанный CA сертификат кластера
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: cluster-ca
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
isCA: true
|
||||
commonName: cluster-ca
|
||||
secretName: cluster-ca-secret
|
||||
privateKey:
|
||||
algorithm: ECDSA
|
||||
size: 256
|
||||
issuerRef:
|
||||
name: selfsigned-issuer
|
||||
kind: ClusterIssuer
|
||||
group: cert-manager.io
|
||||
---
|
||||
# CA Issuer — используй этот для выдачи сертификатов приложениям
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: cluster-ca-issuer
|
||||
spec:
|
||||
ca:
|
||||
secretName: cluster-ca-secret
|
||||
7
addons/csi-nfs/playbook.yml
Normal file
7
addons/csi-nfs/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install CSI NFS Driver
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
29
addons/csi-nfs/role/defaults/main.yml
Normal file
29
addons/csi-nfs/role/defaults/main.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
# Версия CSI NFS Driver
|
||||
csi_nfs_version: "v4.8.0"
|
||||
|
||||
# Helm chart
|
||||
csi_nfs_chart_repo: "https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts"
|
||||
csi_nfs_chart_name: "csi-driver-nfs"
|
||||
csi_nfs_namespace: "kube-system"
|
||||
|
||||
# NFS сервер — IP или hostname машины с NFS
|
||||
# Обычно это master нода или отдельный NFS сервер
|
||||
csi_nfs_server: "{{ hostvars[groups['nfs_server'][0]]['ansible_host'] | default(hostvars[groups['k3s_master'][0]]['ansible_host']) }}"
|
||||
|
||||
# Базовый путь экспорта на NFS сервере
|
||||
csi_nfs_share: "/storage/nfs"
|
||||
|
||||
# StorageClass настройки
|
||||
# Имя включает hostname NFS сервера: nfs-master01, nfs-storage01, etc.
|
||||
csi_nfs_storageclass_name: "nfs-{{ groups['nfs_server'][0] | default(groups['k3s_master'][0]) }}"
|
||||
csi_nfs_storageclass_default: true
|
||||
csi_nfs_reclaimМ_policy: "Delete" # Delete | Retain
|
||||
csi_nfs_volume_binding_mode: "Immediate"
|
||||
|
||||
# Монтирование подпапок для каждого PVC
|
||||
# onDelete: delete | retain | archive
|
||||
csi_nfs_on_delete: "delete"
|
||||
|
||||
# nfs-common нужен на всех нодах для монтирования
|
||||
csi_nfs_install_client: true
|
||||
8
addons/csi-nfs/role/meta/main.yml
Normal file
8
addons/csi-nfs/role/meta/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "your-name"
|
||||
description: "Deploy CSI NFS Driver and StorageClass for K3S"
|
||||
license: "MIT"
|
||||
min_ansible_version: "2.12"
|
||||
dependencies:
|
||||
- role: nfs-server
|
||||
97
addons/csi-nfs/role/tasks/main.yml
Normal file
97
addons/csi-nfs/role/tasks/main.yml
Normal file
@@ -0,0 +1,97 @@
|
||||
---
|
||||
- name: Install NFS client on all K3S nodes
|
||||
ansible.builtin.apt:
|
||||
name: nfs-common
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
when: csi_nfs_install_client
|
||||
# Выполняется на ВСЕХ нодах кластера (master + workers)
|
||||
|
||||
- name: Add CSI NFS Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: "{{ csi_nfs_chart_name }}"
|
||||
repo_url: "{{ csi_nfs_chart_repo }}"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
become: true
|
||||
|
||||
- name: Deploy CSI NFS Driver via Helm
|
||||
kubernetes.core.helm:
|
||||
name: "{{ csi_nfs_chart_name }}"
|
||||
chart_ref: "{{ csi_nfs_chart_name }}/{{ csi_nfs_chart_name }}"
|
||||
chart_version: "{{ csi_nfs_version }}"
|
||||
release_namespace: "{{ csi_nfs_namespace }}"
|
||||
create_namespace: false
|
||||
wait: true
|
||||
wait_condition:
|
||||
type: Ready
|
||||
timeout: "5m0s"
|
||||
values:
|
||||
controller:
|
||||
replicas: 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
node:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
run_once: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
become: true
|
||||
|
||||
- name: Deploy NFS StorageClass
|
||||
ansible.builtin.template:
|
||||
src: storageclass.yaml.j2
|
||||
dest: /tmp/nfs-storageclass.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply NFS StorageClass
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl apply -f /tmp/nfs-storageclass.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: true
|
||||
|
||||
- name: Verify CSI NFS pods are running
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ csi_nfs_namespace }} get pods
|
||||
-l app=csi-nfs-controller
|
||||
-o jsonpath='{.items[*].status.phase}'
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: csi_pods
|
||||
until: "'Running' in csi_pods.stdout"
|
||||
retries: 20
|
||||
delay: 10
|
||||
changed_when: false
|
||||
|
||||
- name: Show StorageClass
|
||||
ansible.builtin.command: k3s kubectl get storageclass
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: sc_list
|
||||
changed_when: false
|
||||
|
||||
- name: Display StorageClasses
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ sc_list.stdout_lines }}"
|
||||
run_once: true
|
||||
23
addons/csi-nfs/role/templates/storageclass.yaml.j2
Normal file
23
addons/csi-nfs/role/templates/storageclass.yaml.j2
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
# NFS StorageClass — управляется Ansible (roles/csi-nfs)
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ csi_nfs_storageclass_name }}
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "{{ 'true' if csi_nfs_storageclass_default else 'false' }}"
|
||||
provisioner: nfs.csi.k8s.io
|
||||
parameters:
|
||||
server: {{ csi_nfs_server }}
|
||||
share: {{ csi_nfs_share }}
|
||||
# Создавать подпапку для каждого PVC (рекомендуется)
|
||||
subDir: ${pvc.metadata.namespace}/${pvc.metadata.name}/${pv.metadata.name}
|
||||
onDelete: {{ csi_nfs_on_delete }}
|
||||
reclaimPolicy: {{ csi_nfs_reclaim_policy }}
|
||||
volumeBindingMode: {{ csi_nfs_volume_binding_mode }}
|
||||
mountOptions:
|
||||
- nfsvers=4.1
|
||||
- hard
|
||||
- intr
|
||||
- timeo=600
|
||||
- retrans=3
|
||||
7
addons/ingress-nginx/playbook.yml
Normal file
7
addons/ingress-nginx/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install ingress-nginx
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
68
addons/ingress-nginx/role/defaults/main.yml
Normal file
68
addons/ingress-nginx/role/defaults/main.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
---
|
||||
# Версия ingress-nginx
|
||||
ingress_nginx_version: "4.10.1" # Helm chart version
|
||||
ingress_nginx_namespace: "ingress-nginx"
|
||||
|
||||
# Helm repo
|
||||
ingress_nginx_chart_repo: "https://kubernetes.github.io/ingress-nginx"
|
||||
ingress_nginx_chart_name: "ingress-nginx"
|
||||
|
||||
# Тип сервиса: LoadBalancer (с kube-vip) или NodePort
|
||||
ingress_nginx_service_type: "LoadBalancer"
|
||||
|
||||
# Если LoadBalancer — статический IP (из пула kube-vip)
|
||||
# Оставь пустым для автоматического назначения
|
||||
ingress_nginx_load_balancer_ip: ""
|
||||
|
||||
# NodePort порты (используются когда service_type = NodePort)
|
||||
ingress_nginx_http_nodeport: 30080
|
||||
ingress_nginx_https_nodeport: 30443
|
||||
|
||||
# Количество реплик контроллера
|
||||
ingress_nginx_replica_count: 1
|
||||
|
||||
# Включить Prometheus метрики
|
||||
ingress_nginx_metrics_enabled: false
|
||||
|
||||
# Использовать DaemonSet вместо Deployment (рекомендуется для edge/RPi кластеров)
|
||||
ingress_nginx_use_daemonset: false
|
||||
|
||||
# Дополнительные аргументы контроллера
|
||||
ingress_nginx_extra_args: {}
|
||||
# Пример:
|
||||
# ingress_nginx_extra_args:
|
||||
# enable-ssl-passthrough: ""
|
||||
# default-ssl-certificate: "default/my-tls-secret"
|
||||
|
||||
# IngressClass
|
||||
ingress_nginx_class_name: "nginx"
|
||||
ingress_nginx_set_default_class: true
|
||||
|
||||
# ─── Custom error backend ─────────────────────────────────────────────────────
|
||||
# Деплоит nginx-под с кастомной страницей ошибок, заменяет дефолтный backend
|
||||
ingress_nginx_custom_errors_enabled: true
|
||||
|
||||
# Коды ошибок бекендов, перехватываемые контроллером → отправляются в error-backend
|
||||
ingress_nginx_custom_http_errors: "400,401,403,404,405,408,413,429,500,502,503,504"
|
||||
|
||||
# Название кластера — отображается на странице ошибки
|
||||
ingress_nginx_error_cluster_name: "K3S Cluster"
|
||||
|
||||
# Домен или описание кластера (опционально)
|
||||
ingress_nginx_error_cluster_domain: ""
|
||||
|
||||
# nginx тег для error-backend пода
|
||||
ingress_nginx_error_backend_nginx_tag: "1.27-alpine"
|
||||
|
||||
# Количество реплик error-backend
|
||||
ingress_nginx_error_backend_replicas: 1
|
||||
|
||||
# ─── Ресурсы контроллера ──────────────────────────────────────────────────────
|
||||
# Ресурсы контроллера
|
||||
ingress_nginx_resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
8
addons/ingress-nginx/role/handlers/main.yml
Normal file
8
addons/ingress-nginx/role/handlers/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Restart K3S server
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: restarted
|
||||
daemon_reload: true
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
8
addons/ingress-nginx/role/meta/main.yml
Normal file
8
addons/ingress-nginx/role/meta/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "your-name"
|
||||
description: "Deploy ingress-nginx controller via Helm for K3S"
|
||||
license: "MIT"
|
||||
min_ansible_version: "2.12"
|
||||
dependencies:
|
||||
- role: kube-vip
|
||||
133
addons/ingress-nginx/role/tasks/main.yml
Normal file
133
addons/ingress-nginx/role/tasks/main.yml
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
- name: Disable K3S built-in Traefik (required before ingress-nginx)
|
||||
ansible.builtin.lineinfile:
|
||||
path: "{{ k3s_config_dir }}/config.yaml"
|
||||
line: "disable: traefik"
|
||||
regexp: "^disable:"
|
||||
state: present
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
notify: Restart K3S server
|
||||
when: not k3s_disable_traefik
|
||||
|
||||
- name: Flush handlers (restart K3S if Traefik was just disabled)
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Ensure ingress-nginx namespace exists
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl create namespace {{ ingress_nginx_namespace }}
|
||||
--dry-run=client -o yaml | k3s kubectl apply -f -
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Add ingress-nginx Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: "{{ ingress_nginx_chart_name }}"
|
||||
repo_url: "{{ ingress_nginx_chart_repo }}"
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Template Helm values
|
||||
ansible.builtin.template:
|
||||
src: ingress-nginx-values.yaml.j2
|
||||
dest: /tmp/ingress-nginx-values.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Deploy ingress-nginx via Helm
|
||||
kubernetes.core.helm:
|
||||
name: "{{ ingress_nginx_chart_name }}"
|
||||
chart_ref: "{{ ingress_nginx_chart_name }}/{{ ingress_nginx_chart_name }}"
|
||||
chart_version: "{{ ingress_nginx_version }}"
|
||||
release_namespace: "{{ ingress_nginx_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values_files:
|
||||
- /tmp/ingress-nginx-values.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Deploy custom error backend
|
||||
when: ingress_nginx_custom_errors_enabled | bool
|
||||
block:
|
||||
- name: Render custom error backend manifest
|
||||
ansible.builtin.template:
|
||||
src: custom-error-backend.yaml.j2
|
||||
dest: /tmp/ingress-nginx-error-backend.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply custom error backend
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl apply -f /tmp/ingress-nginx-error-backend.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Wait for error backend to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ ingress_nginx_namespace }}
|
||||
rollout status deployment/ingress-nginx-errors --timeout=120s
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Wait for ingress-nginx controller to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ ingress_nginx_namespace }} rollout status
|
||||
deployment/{{ ingress_nginx_chart_name }}-controller
|
||||
--timeout=180s
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: nginx_rollout
|
||||
changed_when: false
|
||||
retries: 3
|
||||
delay: 10
|
||||
until: nginx_rollout.rc == 0
|
||||
|
||||
- name: Get ingress-nginx service info
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ ingress_nginx_namespace }}
|
||||
get svc {{ ingress_nginx_chart_name }}-controller
|
||||
-o wide
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: nginx_svc
|
||||
changed_when: false
|
||||
|
||||
- name: Show ingress-nginx service
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ nginx_svc.stdout_lines }}"
|
||||
run_once: true
|
||||
|
||||
- name: Deploy test IngressClass (verify)
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl get ingressclass {{ ingress_nginx_class_name }}
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: ingress_class_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Show IngressClass status
|
||||
ansible.builtin.debug:
|
||||
msg: "IngressClass '{{ ingress_nginx_class_name }}': {{ 'OK' if ingress_class_check.rc == 0 else 'NOT FOUND' }}"
|
||||
run_once: true
|
||||
163
addons/ingress-nginx/role/templates/custom-error-backend.yaml.j2
Normal file
163
addons/ingress-nginx/role/templates/custom-error-backend.yaml.j2
Normal file
@@ -0,0 +1,163 @@
|
||||
---
|
||||
# Custom error backend для ingress-nginx
|
||||
# Разворачивается Ansible — не редактировать вручную
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-errors-config
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx-errors
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
data:
|
||||
nginx.conf: |
|
||||
worker_processes 1;
|
||||
error_log /dev/stderr warn;
|
||||
pid /tmp/nginx.pid;
|
||||
|
||||
events { worker_connections 256; }
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type text/html;
|
||||
access_log /dev/stdout;
|
||||
client_body_temp_path /tmp/client_body;
|
||||
proxy_temp_path /tmp/proxy;
|
||||
fastcgi_temp_path /tmp/fastcgi;
|
||||
|
||||
# Маппинг кода ошибки в человекочитаемое сообщение
|
||||
map $http_x_code $error_message {
|
||||
default "Произошла ошибка";
|
||||
"400" "Некорректный запрос";
|
||||
"401" "Требуется авторизация";
|
||||
"403" "Доступ запрещён";
|
||||
"404" "Страница не найдена";
|
||||
"405" "Метод не разрешён";
|
||||
"408" "Таймаут запроса";
|
||||
"413" "Запрос слишком большой";
|
||||
"429" "Слишком много запросов";
|
||||
"500" "Внутренняя ошибка сервера";
|
||||
"502" "Служба недоступна";
|
||||
"503" "Служба временно недоступна";
|
||||
"504" "Превышено время ожидания";
|
||||
}
|
||||
|
||||
# Если X-Code пустой (прямой доступ к бекенду) — показываем 404
|
||||
map $http_x_code $display_code {
|
||||
"" "404";
|
||||
default $http_x_code;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8080;
|
||||
server_name _;
|
||||
root /usr/share/nginx/html;
|
||||
|
||||
# Health check — ingress-nginx проверяет бекенд через этот endpoint
|
||||
location /healthz {
|
||||
return 200 "ok\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files /error.html =200;
|
||||
|
||||
# Подставляем код и сообщение в HTML через sub_filter
|
||||
sub_filter '%%CODE%%' $display_code;
|
||||
sub_filter '%%MESSAGE%%' $error_message;
|
||||
sub_filter_once off;
|
||||
add_header Cache-Control "no-cache, no-store, must-revalidate";
|
||||
add_header X-Error-Code $display_code;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error.html: |
|
||||
{{ lookup('template', 'custom-error-page.html.j2') | indent(4) }}
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ingress-nginx-errors
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx-errors
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
replicas: {{ ingress_nginx_error_backend_replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx-errors
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx-errors
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 101
|
||||
runAsGroup: 101
|
||||
containers:
|
||||
- name: error-backend
|
||||
image: nginx:{{ ingress_nginx_error_backend_nginx_tag }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/nginx/nginx.conf
|
||||
subPath: nginx.conf
|
||||
- name: config
|
||||
mountPath: /usr/share/nginx/html/error.html
|
||||
subPath: error.html
|
||||
- name: tmp
|
||||
mountPath: /tmp
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 16Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 32Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: ingress-nginx-errors-config
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-errors
|
||||
namespace: {{ ingress_nginx_namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx-errors
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx-errors
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
162
addons/ingress-nginx/role/templates/custom-error-page.html.j2
Normal file
162
addons/ingress-nginx/role/templates/custom-error-page.html.j2
Normal file
@@ -0,0 +1,162 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="ru">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>%%CODE%% — {{ ingress_nginx_error_cluster_name }}</title>
|
||||
<style>
|
||||
*, *::before, *::after { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto,
|
||||
'Helvetica Neue', Arial, sans-serif;
|
||||
background: #0d1117;
|
||||
color: #e6edf3;
|
||||
min-height: 100vh;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
padding: 1.5rem;
|
||||
}
|
||||
|
||||
.card {
|
||||
background: #161b22;
|
||||
border: 1px solid #21262d;
|
||||
border-radius: 12px;
|
||||
padding: 3rem 3.5rem;
|
||||
max-width: 520px;
|
||||
width: 100%;
|
||||
text-align: center;
|
||||
box-shadow: 0 16px 48px rgba(0,0,0,0.4);
|
||||
}
|
||||
|
||||
.icon {
|
||||
width: 64px;
|
||||
height: 64px;
|
||||
margin: 0 auto 1.75rem;
|
||||
}
|
||||
|
||||
.code {
|
||||
font-size: 5.5rem;
|
||||
font-weight: 800;
|
||||
line-height: 1;
|
||||
letter-spacing: -0.04em;
|
||||
background: linear-gradient(135deg, #326CE5 0%, #00b4d8 100%);
|
||||
-webkit-background-clip: text;
|
||||
-webkit-text-fill-color: transparent;
|
||||
background-clip: text;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.divider {
|
||||
width: 40px;
|
||||
height: 3px;
|
||||
background: linear-gradient(90deg, #326CE5, #00b4d8);
|
||||
border-radius: 2px;
|
||||
margin: 1.25rem auto;
|
||||
}
|
||||
|
||||
.message {
|
||||
font-size: 1.2rem;
|
||||
font-weight: 500;
|
||||
color: #c9d1d9;
|
||||
margin-bottom: 0.5rem;
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.meta {
|
||||
font-size: 0.8rem;
|
||||
color: #484f58;
|
||||
font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, monospace;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.meta span {
|
||||
color: #30a46c;
|
||||
}
|
||||
|
||||
.actions {
|
||||
display: flex;
|
||||
gap: 0.75rem;
|
||||
justify-content: center;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.btn {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.4rem;
|
||||
padding: 0.55rem 1.2rem;
|
||||
border-radius: 6px;
|
||||
font-size: 0.875rem;
|
||||
font-weight: 500;
|
||||
text-decoration: none;
|
||||
cursor: pointer;
|
||||
transition: background 0.15s, border-color 0.15s, color 0.15s;
|
||||
border: 1px solid transparent;
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
background: #326CE5;
|
||||
color: #fff;
|
||||
border-color: #1a56c9;
|
||||
}
|
||||
.btn-primary:hover { background: #1a56c9; }
|
||||
|
||||
.btn-ghost {
|
||||
background: transparent;
|
||||
color: #8b949e;
|
||||
border-color: #30363d;
|
||||
}
|
||||
.btn-ghost:hover { color: #e6edf3; border-color: #8b949e; }
|
||||
|
||||
footer {
|
||||
margin-top: 2rem;
|
||||
font-size: 0.75rem;
|
||||
color: #30363d;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="card">
|
||||
<!-- Kubernetes wheel icon -->
|
||||
<svg class="icon" viewBox="0 0 64 64" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<circle cx="32" cy="32" r="30" stroke="#326CE5" stroke-width="3" opacity="0.4"/>
|
||||
<circle cx="32" cy="32" r="6" fill="#326CE5"/>
|
||||
<!-- 7 spokes (360/7 ≈ 51.4°) -->
|
||||
<line x1="32" y1="32" x2="32" y2="6" stroke="#326CE5" stroke-width="3" stroke-linecap="round"/>
|
||||
<line x1="32" y1="32" x2="54" y2="18" stroke="#326CE5" stroke-width="3" stroke-linecap="round"/>
|
||||
<line x1="32" y1="32" x2="58" y2="43" stroke="#326CE5" stroke-width="3" stroke-linecap="round"/>
|
||||
<line x1="32" y1="32" x2="43" y2="59" stroke="#326CE5" stroke-width="3" stroke-linecap="round"/>
|
||||
<line x1="32" y1="32" x2="21" y2="59" stroke="#326CE5" stroke-width="3" stroke-linecap="round"/>
|
||||
<line x1="32" y1="32" x2="6" y2="43" stroke="#326CE5" stroke-width="3" stroke-linecap="round"/>
|
||||
<line x1="32" y1="32" x2="10" y2="18" stroke="#326CE5" stroke-width="3" stroke-linecap="round"/>
|
||||
<circle cx="32" cy="6" r="3" fill="#326CE5"/>
|
||||
<circle cx="54" cy="18" r="3" fill="#326CE5"/>
|
||||
<circle cx="58" cy="43" r="3" fill="#326CE5"/>
|
||||
<circle cx="43" cy="59" r="3" fill="#326CE5"/>
|
||||
<circle cx="21" cy="59" r="3" fill="#326CE5"/>
|
||||
<circle cx="6" cy="43" r="3" fill="#326CE5"/>
|
||||
<circle cx="10" cy="18" r="3" fill="#326CE5"/>
|
||||
</svg>
|
||||
|
||||
<div class="code">%%CODE%%</div>
|
||||
<div class="divider"></div>
|
||||
<div class="message">%%MESSAGE%%</div>
|
||||
<div class="meta">
|
||||
<span>{{ ingress_nginx_error_cluster_name }}</span>
|
||||
{% if ingress_nginx_error_cluster_domain %}
|
||||
· {{ ingress_nginx_error_cluster_domain }}
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<div class="actions">
|
||||
<a href="javascript:history.back()" class="btn btn-ghost">← Назад</a>
|
||||
<a href="/" class="btn btn-primary">На главную</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<footer>{{ ingress_nginx_error_cluster_name }} · K3S Kubernetes Cluster</footer>
|
||||
</body>
|
||||
</html>
|
||||
106
addons/ingress-nginx/role/templates/ingress-nginx-values.yaml.j2
Normal file
106
addons/ingress-nginx/role/templates/ingress-nginx-values.yaml.j2
Normal file
@@ -0,0 +1,106 @@
|
||||
## ingress-nginx Helm values
|
||||
## Управляется Ansible (roles/ingress-nginx)
|
||||
|
||||
controller:
|
||||
ingressClassResource:
|
||||
name: "{{ ingress_nginx_class_name }}"
|
||||
enabled: true
|
||||
default: {{ ingress_nginx_set_default_class | lower }}
|
||||
|
||||
ingressClass: "{{ ingress_nginx_class_name }}"
|
||||
|
||||
{% if ingress_nginx_use_daemonset %}
|
||||
kind: DaemonSet
|
||||
{% else %}
|
||||
kind: Deployment
|
||||
replicaCount: {{ ingress_nginx_replica_count }}
|
||||
{% endif %}
|
||||
|
||||
service:
|
||||
type: {{ ingress_nginx_service_type }}
|
||||
{% if ingress_nginx_service_type == "LoadBalancer" and ingress_nginx_load_balancer_ip %}
|
||||
loadBalancerIP: "{{ ingress_nginx_load_balancer_ip }}"
|
||||
{% endif %}
|
||||
{% if ingress_nginx_service_type == "NodePort" %}
|
||||
nodePorts:
|
||||
http: {{ ingress_nginx_http_nodeport }}
|
||||
https: {{ ingress_nginx_https_nodeport }}
|
||||
{% endif %}
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "{{ ingress_nginx_resources.requests.cpu }}"
|
||||
memory: "{{ ingress_nginx_resources.requests.memory }}"
|
||||
limits:
|
||||
cpu: "{{ ingress_nginx_resources.limits.cpu }}"
|
||||
memory: "{{ ingress_nginx_resources.limits.memory }}"
|
||||
|
||||
# Логирование в JSON для удобного парсинга
|
||||
config:
|
||||
{% if ingress_nginx_custom_errors_enabled %}
|
||||
custom-http-errors: "{{ ingress_nginx_custom_http_errors }}"
|
||||
{% endif %}
|
||||
log-format-upstream: >-
|
||||
{"time":"$time_iso8601","remote_addr":"$remote_addr",
|
||||
"x_forwarded_for":"$http_x_forwarded_for","request_id":"$req_id",
|
||||
"remote_user":"$remote_user","bytes_sent":"$bytes_sent",
|
||||
"request_time":"$request_time","status":"$status",
|
||||
"vhost":"$host","request_proto":"$server_protocol",
|
||||
"path":"$uri","request_query":"$args",
|
||||
"request_length":"$request_length","duration":"$request_time",
|
||||
"method":"$request_method","http_referrer":"$http_referer",
|
||||
"http_user_agent":"$http_user_agent"}
|
||||
use-forwarded-headers: "true"
|
||||
compute-full-forwarded-for: "true"
|
||||
use-proxy-protocol: "false"
|
||||
proxy-body-size: "50m"
|
||||
proxy-read-timeout: "600"
|
||||
proxy-send-timeout: "600"
|
||||
|
||||
{% if ingress_nginx_extra_args or ingress_nginx_custom_errors_enabled %}
|
||||
extraArgs:
|
||||
{% if ingress_nginx_custom_errors_enabled %}
|
||||
default-backend-service: "{{ ingress_nginx_namespace }}/ingress-nginx-errors"
|
||||
{% endif %}
|
||||
{% for key, value in ingress_nginx_extra_args.items() %}
|
||||
{{ key }}: "{{ value }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
metrics:
|
||||
enabled: {{ ingress_nginx_metrics_enabled | lower }}
|
||||
{% if ingress_nginx_metrics_enabled %}
|
||||
serviceMonitor:
|
||||
enabled: false # включи если есть Prometheus Operator
|
||||
{% endif %}
|
||||
|
||||
# Tolerations для запуска на мастере и RPi
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-type"
|
||||
operator: "Equal"
|
||||
value: "raspberry-pi"
|
||||
effect: "NoSchedule"
|
||||
|
||||
admissionWebhooks:
|
||||
enabled: true
|
||||
failurePolicy: Fail
|
||||
|
||||
defaultBackend:
|
||||
{% if ingress_nginx_custom_errors_enabled %}
|
||||
enabled: false # кастомный error backend деплоится отдельно
|
||||
{% else %}
|
||||
enabled: true
|
||||
image:
|
||||
registry: registry.k8s.io
|
||||
image: defaultbackend-amd64
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
{% endif %}
|
||||
7
addons/istio/playbook.yml
Normal file
7
addons/istio/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install Istio + Kiali
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
60
addons/istio/role/defaults/main.yml
Normal file
60
addons/istio/role/defaults/main.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
istio_version: "1.22.2" # Helm chart version (совпадает с версией Istio)
|
||||
istio_namespace: "istio-system"
|
||||
|
||||
istio_chart_repo: "https://istio-release.storage.googleapis.com/charts"
|
||||
|
||||
# Устанавливать Istio Ingress Gateway (LoadBalancer)
|
||||
istio_install_gateway: true
|
||||
|
||||
# Включить mutual TLS между сервисами
|
||||
istio_mtls_mode: "STRICT" # STRICT | PERMISSIVE | DISABLE
|
||||
|
||||
# Ресурсы istiod (control plane)
|
||||
istio_pilot_resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
|
||||
# Ресурсы gateway
|
||||
istio_gateway_resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
# Включить Prometheus-совместимый сбор метрик
|
||||
istio_telemetry_enabled: true
|
||||
|
||||
# ─── Kiali (Service Mesh UI) ──────────────────────────────────────────────────
|
||||
# Установка по желанию (истио должен быть включён)
|
||||
kiali_enabled: false
|
||||
|
||||
kiali_version: "1.86.0" # Helm chart version
|
||||
kiali_namespace: "{{ istio_namespace }}"
|
||||
kiali_chart_repo: "https://kiali.org/helm-charts"
|
||||
|
||||
# Токен для входа в Kiali UI.
|
||||
# Задай в group_vars/all/vault.yml: vault_kiali_token: "ваш-токен"
|
||||
# После первой установки Ansible выведет сгенерированный токен —
|
||||
# скопируй его в vault.yml для последующих запусков.
|
||||
kiali_token: "{{ vault_kiali_token | default('') }}"
|
||||
|
||||
# Ingress для Kiali (требует ingress-nginx)
|
||||
kiali_ingress_enabled: false
|
||||
kiali_ingress_host: "kiali.local"
|
||||
kiali_ingress_class: "nginx"
|
||||
|
||||
# Ресурсы Kiali
|
||||
kiali_resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
6
addons/istio/role/meta/main.yml
Normal file
6
addons/istio/role/meta/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
galaxy_info:
|
||||
role_name: istio
|
||||
description: Deploy Istio service mesh via Helm on K3S
|
||||
min_ansible_version: "2.14"
|
||||
dependencies: []
|
||||
76
addons/istio/role/molecule/default/converge.yml
Normal file
76
addons/istio/role/molecule/default/converge.yml
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
- name: Converge — istio role template tests
|
||||
hosts: all
|
||||
become: false
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
istio_enabled: true
|
||||
istio_version: "1.22.2"
|
||||
istio_namespace: "istio-system"
|
||||
istio_mtls_mode: "STRICT"
|
||||
istio_install_gateway: true
|
||||
istio_telemetry_enabled: true
|
||||
|
||||
istio_pilot_resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
|
||||
istio_gateway_resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
kiali_enabled: true
|
||||
kiali_namespace: "istio-system"
|
||||
kiali_auth_strategy: "token"
|
||||
kiali_ingress_enabled: false
|
||||
kiali_ingress_host: "kiali.local"
|
||||
kiali_ingress_class: "nginx"
|
||||
kiali_resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
|
||||
# Vars для kiali-values.yaml.j2 (интеграция с prometheus-stack)
|
||||
prometheus_stack_enabled: true
|
||||
prometheus_stack_release_name: "prom"
|
||||
prometheus_stack_namespace: "monitoring"
|
||||
prometheus_grafana_enabled: true
|
||||
grafana_admin_user: "admin"
|
||||
prometheus_grafana_admin_password: "molecule-test-pass"
|
||||
|
||||
tasks:
|
||||
- name: Render istiod Helm values
|
||||
ansible.builtin.template:
|
||||
src: "{{ playbook_dir }}/../../templates/istiod-values.yaml.j2"
|
||||
dest: /tmp/istiod-values.yaml
|
||||
mode: '0644'
|
||||
|
||||
- name: Render Kiali Helm values
|
||||
ansible.builtin.template:
|
||||
src: "{{ playbook_dir }}/../../templates/kiali-values.yaml.j2"
|
||||
dest: /tmp/kiali-values.yaml
|
||||
mode: '0644'
|
||||
|
||||
- name: Render PeerAuthentication manifest
|
||||
ansible.builtin.template:
|
||||
src: "{{ playbook_dir }}/../../templates/peer-authentication.yaml.j2"
|
||||
dest: /tmp/peer-authentication.yaml
|
||||
mode: '0644'
|
||||
|
||||
- name: Render Kiali token secret manifest
|
||||
ansible.builtin.template:
|
||||
src: "{{ playbook_dir }}/../../templates/kiali-token-secret.yaml.j2"
|
||||
dest: /tmp/kiali-token-secret.yaml
|
||||
mode: '0644'
|
||||
26
addons/istio/role/molecule/default/molecule.yml
Normal file
26
addons/istio/role/molecule/default/molecule.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
driver:
|
||||
name: docker
|
||||
|
||||
platforms:
|
||||
# master01 — единственная нода нужна, шаблоны деплоятся с первого мастера
|
||||
- name: master01
|
||||
image: geerlingguy/docker-ubuntu2204-ansible:latest
|
||||
pre_build_image: true
|
||||
|
||||
provisioner:
|
||||
name: ansible
|
||||
playbooks:
|
||||
converge: converge.yml
|
||||
verify: verify.yml
|
||||
config_options:
|
||||
defaults:
|
||||
interpreter_python: auto_silent
|
||||
|
||||
verifier:
|
||||
name: ansible
|
||||
|
||||
lint: |
|
||||
set -e
|
||||
yamllint .
|
||||
ansible-lint
|
||||
107
addons/istio/role/molecule/default/verify.yml
Normal file
107
addons/istio/role/molecule/default/verify.yml
Normal file
@@ -0,0 +1,107 @@
|
||||
---
|
||||
- name: Verify — istio role templates
|
||||
hosts: all
|
||||
become: false
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
# ── istiod Helm values ───────────────────────────────────────────────────────
|
||||
- name: Read istiod values
|
||||
ansible.builtin.slurp:
|
||||
src: /tmp/istiod-values.yaml
|
||||
register: istiod_raw
|
||||
|
||||
- name: Parse istiod YAML
|
||||
ansible.builtin.set_fact:
|
||||
istiod: "{{ istiod_raw.content | b64decode | from_yaml }}"
|
||||
|
||||
- name: Assert istiod pilot resources exist
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- istiod.pilot is defined
|
||||
- istiod.pilot.resources.requests.cpu == '100m'
|
||||
- istiod.pilot.resources.limits.memory == '512Mi'
|
||||
fail_msg: "istiod pilot resources настроены неверно"
|
||||
|
||||
- name: Assert meshConfig exists
|
||||
ansible.builtin.assert:
|
||||
that: istiod.meshConfig is defined
|
||||
fail_msg: "meshConfig отсутствует в istiod values"
|
||||
|
||||
- name: Assert telemetry flag
|
||||
ansible.builtin.assert:
|
||||
that: istiod.meshConfig.enablePrometheusMerge == true
|
||||
fail_msg: "enablePrometheusMerge должен быть true при istio_telemetry_enabled=true"
|
||||
|
||||
# ── Kiali Helm values ────────────────────────────────────────────────────────
|
||||
- name: Read kiali values
|
||||
ansible.builtin.slurp:
|
||||
src: /tmp/kiali-values.yaml
|
||||
register: kiali_raw
|
||||
|
||||
- name: Parse kiali YAML
|
||||
ansible.builtin.set_fact:
|
||||
kiali: "{{ kiali_raw.content | b64decode | from_yaml }}"
|
||||
|
||||
- name: Assert kiali auth strategy is token
|
||||
ansible.builtin.assert:
|
||||
that: kiali.auth.strategy == 'token'
|
||||
fail_msg: "Kiali auth.strategy должен быть 'token', получено: {{ kiali.auth.strategy }}"
|
||||
|
||||
- name: Assert kiali external_services prometheus URL
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- kiali.external_services.prometheus.url is defined
|
||||
- "'prom-kube-prometheus-stack-prometheus' in kiali.external_services.prometheus.url"
|
||||
fail_msg: "Kiali Prometheus URL настроен неверно: {{ kiali.external_services.prometheus.url }}"
|
||||
|
||||
- name: Assert kiali grafana integration
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- kiali.external_services.grafana.enabled == true
|
||||
- kiali.external_services.grafana.auth.username == 'admin'
|
||||
fail_msg: "Kiali Grafana интеграция настроена неверно"
|
||||
|
||||
# ── PeerAuthentication ───────────────────────────────────────────────────────
|
||||
- name: Read PeerAuthentication manifest
|
||||
ansible.builtin.slurp:
|
||||
src: /tmp/peer-authentication.yaml
|
||||
register: peer_raw
|
||||
|
||||
- name: Parse PeerAuthentication YAML
|
||||
ansible.builtin.set_fact:
|
||||
peer_auth: "{{ peer_raw.content | b64decode | from_yaml }}"
|
||||
|
||||
- name: Assert PeerAuthentication kind
|
||||
ansible.builtin.assert:
|
||||
that: peer_auth.kind == 'PeerAuthentication'
|
||||
fail_msg: "Неверный kind: {{ peer_auth.kind }}"
|
||||
|
||||
- name: Assert mTLS mode is STRICT
|
||||
ansible.builtin.assert:
|
||||
that: peer_auth.spec.mtls.mode == 'STRICT'
|
||||
fail_msg: "mTLS mode должен быть STRICT, получено: {{ peer_auth.spec.mtls.mode }}"
|
||||
|
||||
# ── Kiali Token Secret ───────────────────────────────────────────────────────
|
||||
- name: Read kiali token secret manifest
|
||||
ansible.builtin.slurp:
|
||||
src: /tmp/kiali-token-secret.yaml
|
||||
register: kiali_secret_raw
|
||||
|
||||
- name: Parse kiali token secret YAML
|
||||
ansible.builtin.set_fact:
|
||||
kiali_secret: "{{ kiali_secret_raw.content | b64decode | from_yaml }}"
|
||||
|
||||
- name: Assert kiali secret type
|
||||
ansible.builtin.assert:
|
||||
that: kiali_secret.type == 'kubernetes.io/service-account-token'
|
||||
fail_msg: "Неверный тип секрета: {{ kiali_secret.type }}"
|
||||
|
||||
- name: Assert kiali secret annotation
|
||||
ansible.builtin.assert:
|
||||
that: kiali_secret.metadata.annotations['kubernetes.io/service-account.name'] == 'kiali-admin'
|
||||
fail_msg: "Неверная аннотация service-account"
|
||||
|
||||
- name: Summary
|
||||
ansible.builtin.debug:
|
||||
msg: "Все проверки istio/kiali прошли успешно"
|
||||
257
addons/istio/role/tasks/main.yml
Normal file
257
addons/istio/role/tasks/main.yml
Normal file
@@ -0,0 +1,257 @@
|
||||
---
|
||||
- name: Add Istio Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: istio
|
||||
repo_url: "{{ istio_chart_repo }}"
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Create istio-system namespace
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl create namespace {{ istio_namespace }}
|
||||
--dry-run=client -o yaml | k3s kubectl apply -f -
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Install Istio base CRDs (istio/base)
|
||||
kubernetes.core.helm:
|
||||
name: istio-base
|
||||
chart_ref: istio/base
|
||||
chart_version: "{{ istio_version }}"
|
||||
release_namespace: "{{ istio_namespace }}"
|
||||
create_namespace: false
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values:
|
||||
defaultRevision: default
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Template istiod values
|
||||
ansible.builtin.template:
|
||||
src: istiod-values.yaml.j2
|
||||
dest: /tmp/istiod-values.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Install istiod (control plane)
|
||||
kubernetes.core.helm:
|
||||
name: istiod
|
||||
chart_ref: istio/istiod
|
||||
chart_version: "{{ istio_version }}"
|
||||
release_namespace: "{{ istio_namespace }}"
|
||||
create_namespace: false
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values_files:
|
||||
- /tmp/istiod-values.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Wait for istiod to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ istio_namespace }}
|
||||
rollout status deployment/istiod --timeout=180s
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: istiod_ready
|
||||
changed_when: false
|
||||
retries: 3
|
||||
delay: 10
|
||||
until: istiod_ready.rc == 0
|
||||
|
||||
- name: Install Istio Gateway
|
||||
kubernetes.core.helm:
|
||||
name: istio-ingressgateway
|
||||
chart_ref: istio/gateway
|
||||
chart_version: "{{ istio_version }}"
|
||||
release_namespace: "{{ istio_namespace }}"
|
||||
create_namespace: false
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values:
|
||||
resources:
|
||||
requests:
|
||||
cpu: "{{ istio_gateway_resources.requests.cpu }}"
|
||||
memory: "{{ istio_gateway_resources.requests.memory }}"
|
||||
limits:
|
||||
cpu: "{{ istio_gateway_resources.limits.cpu }}"
|
||||
memory: "{{ istio_gateway_resources.limits.memory }}"
|
||||
service:
|
||||
type: LoadBalancer
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
when: istio_install_gateway
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Apply default PeerAuthentication (mTLS mode)
|
||||
ansible.builtin.template:
|
||||
src: peer-authentication.yaml.j2
|
||||
dest: /tmp/istio-peer-auth.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply PeerAuthentication to cluster
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl apply -f /tmp/istio-peer-auth.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: true
|
||||
|
||||
- name: Show Istio status
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ istio_namespace }} get pods
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: istio_pods
|
||||
changed_when: false
|
||||
|
||||
- name: Istio pods
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ istio_pods.stdout_lines }}"
|
||||
run_once: true
|
||||
|
||||
|
||||
# ─── Kiali ────────────────────────────────────────────────────────────────────
|
||||
- name: Kiali — install
|
||||
when: kiali_enabled
|
||||
block:
|
||||
- name: Add Kiali Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: kiali
|
||||
repo_url: "{{ kiali_chart_repo }}"
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Create kiali-admin ServiceAccount
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl create serviceaccount kiali-admin
|
||||
-n {{ kiali_namespace }}
|
||||
--dry-run=client -o yaml | k3s kubectl apply -f -
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Bind kiali-admin to cluster-admin
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl create clusterrolebinding kiali-admin
|
||||
--clusterrole=cluster-admin
|
||||
--serviceaccount={{ kiali_namespace }}:kiali-admin
|
||||
--dry-run=client -o yaml | k3s kubectl apply -f -
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Create long-lived token secret for kiali-admin
|
||||
ansible.builtin.template:
|
||||
src: kiali-token-secret.yaml.j2
|
||||
dest: /tmp/kiali-token-secret.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply kiali-admin token secret
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl apply -f /tmp/kiali-token-secret.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Wait for k8s to populate the token
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ kiali_namespace }}
|
||||
get secret kiali-admin-token
|
||||
-o jsonpath="{.data.token}"
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: kiali_token_check
|
||||
until: kiali_token_check.stdout | length > 0
|
||||
retries: 10
|
||||
delay: 3
|
||||
changed_when: false
|
||||
|
||||
- name: Decode Kiali login token
|
||||
ansible.builtin.set_fact:
|
||||
kiali_generated_token: "{{ kiali_token_check.stdout | b64decode }}"
|
||||
run_once: true
|
||||
|
||||
- name: Template Kiali Helm values
|
||||
ansible.builtin.template:
|
||||
src: kiali-values.yaml.j2
|
||||
dest: /tmp/kiali-values.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Deploy Kiali via Helm
|
||||
kubernetes.core.helm:
|
||||
name: kiali-server
|
||||
chart_ref: kiali/kiali-server
|
||||
chart_version: "{{ kiali_version }}"
|
||||
release_namespace: "{{ kiali_namespace }}"
|
||||
create_namespace: false
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values_files:
|
||||
- /tmp/kiali-values.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Wait for Kiali to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ kiali_namespace }}
|
||||
rollout status deployment/kiali --timeout=180s
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: kiali_ready
|
||||
changed_when: false
|
||||
retries: 3
|
||||
delay: 10
|
||||
until: kiali_ready.rc == 0
|
||||
|
||||
- name: Show Kiali access info
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "══════════════════════════════════════════════════"
|
||||
- " Kiali UI доступен через port-forward:"
|
||||
- " kubectl -n {{ kiali_namespace }} port-forward svc/kiali 20001:20001"
|
||||
- " Откройте: http://localhost:20001"
|
||||
- "{% if kiali_ingress_enabled %} Или через Ingress: http://{{ kiali_ingress_host }}{% endif %}"
|
||||
- ""
|
||||
- " Стратегия аутентификации: token"
|
||||
- " Токен для входа:"
|
||||
- " {{ kiali_generated_token }}"
|
||||
- ""
|
||||
- " Сохрани токен в vault.yml:"
|
||||
- " vault_kiali_token: <токен выше>"
|
||||
- "══════════════════════════════════════════════════"
|
||||
run_once: true
|
||||
36
addons/istio/role/templates/istiod-values.yaml.j2
Normal file
36
addons/istio/role/templates/istiod-values.yaml.j2
Normal file
@@ -0,0 +1,36 @@
|
||||
## istiod Helm values
|
||||
## Управляется Ansible (roles/istio)
|
||||
|
||||
pilot:
|
||||
resources:
|
||||
requests:
|
||||
cpu: "{{ istio_pilot_resources.requests.cpu }}"
|
||||
memory: "{{ istio_pilot_resources.requests.memory }}"
|
||||
limits:
|
||||
cpu: "{{ istio_pilot_resources.limits.cpu }}"
|
||||
memory: "{{ istio_pilot_resources.limits.memory }}"
|
||||
|
||||
# Tolerations — можно запускать на мастерах
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
meshConfig:
|
||||
accessLogFile: /dev/stdout
|
||||
enableTracing: false
|
||||
{% if istio_telemetry_enabled %}
|
||||
defaultConfig:
|
||||
proxyMetadata: {}
|
||||
enablePrometheusMerge: true
|
||||
{% endif %}
|
||||
|
||||
global:
|
||||
proxy:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
10
addons/istio/role/templates/kiali-token-secret.yaml.j2
Normal file
10
addons/istio/role/templates/kiali-token-secret.yaml.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
## Долгосрочный токен для ServiceAccount kiali-admin
|
||||
## k8s автоматически заполняет поле .data.token
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kiali-admin-token
|
||||
namespace: {{ kiali_namespace }}
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: kiali-admin
|
||||
type: kubernetes.io/service-account-token
|
||||
64
addons/istio/role/templates/kiali-values.yaml.j2
Normal file
64
addons/istio/role/templates/kiali-values.yaml.j2
Normal file
@@ -0,0 +1,64 @@
|
||||
## Kiali Helm values
|
||||
## Управляется Ansible (roles/istio)
|
||||
|
||||
auth:
|
||||
strategy: token
|
||||
|
||||
deployment:
|
||||
# Использовать существующий ServiceAccount kiali-admin
|
||||
service_account: kiali-admin
|
||||
resources:
|
||||
requests:
|
||||
cpu: "{{ kiali_resources.requests.cpu }}"
|
||||
memory: "{{ kiali_resources.requests.memory }}"
|
||||
limits:
|
||||
cpu: "{{ kiali_resources.limits.cpu }}"
|
||||
memory: "{{ kiali_resources.limits.memory }}"
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
{% if kiali_ingress_enabled %}
|
||||
ingress:
|
||||
enabled: true
|
||||
class_name: "{{ kiali_ingress_class }}"
|
||||
override_yaml:
|
||||
spec:
|
||||
rules:
|
||||
- host: "{{ kiali_ingress_host }}"
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: kiali
|
||||
port:
|
||||
number: 20001
|
||||
{% endif %}
|
||||
|
||||
external_services:
|
||||
prometheus:
|
||||
{% if prometheus_stack_enabled %}
|
||||
url: "http://{{ prometheus_stack_release_name }}-kube-prometheus-stack-prometheus.{{ prometheus_stack_namespace }}:9090"
|
||||
{% else %}
|
||||
url: "http://prometheus-operated.monitoring:9090"
|
||||
{% endif %}
|
||||
|
||||
grafana:
|
||||
enabled: {{ prometheus_grafana_enabled | lower }}
|
||||
{% if prometheus_stack_enabled and prometheus_grafana_enabled %}
|
||||
url: "http://{{ prometheus_stack_release_name }}-grafana.{{ prometheus_stack_namespace }}:80"
|
||||
auth:
|
||||
username: "{{ grafana_admin_user }}"
|
||||
password: "{{ prometheus_grafana_admin_password }}"
|
||||
type: basic
|
||||
{% endif %}
|
||||
|
||||
istio:
|
||||
root_namespace: "{{ istio_namespace }}"
|
||||
|
||||
server:
|
||||
port: 20001
|
||||
web_root: /kiali
|
||||
9
addons/istio/role/templates/peer-authentication.yaml.j2
Normal file
9
addons/istio/role/templates/peer-authentication.yaml.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
## Глобальный режим mTLS для всего mesh
|
||||
apiVersion: security.istio.io/v1beta1
|
||||
kind: PeerAuthentication
|
||||
metadata:
|
||||
name: default
|
||||
namespace: {{ istio_namespace }}
|
||||
spec:
|
||||
mtls:
|
||||
mode: {{ istio_mtls_mode }}
|
||||
6
addons/kubernetes-dashboard/playbook.yml
Normal file
6
addons/kubernetes-dashboard/playbook.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Install Kubernetes Dashboard
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
13
addons/kubernetes-dashboard/role/defaults/main.yml
Normal file
13
addons/kubernetes-dashboard/role/defaults/main.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
dashboard_version: "7.5.0"
|
||||
dashboard_namespace: "kubernetes-dashboard"
|
||||
dashboard_chart_repo: "https://kubernetes.github.io/dashboard/"
|
||||
|
||||
dashboard_ingress_enabled: false
|
||||
dashboard_ingress_host: "dashboard.example.com"
|
||||
dashboard_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
|
||||
dashboard_ingress_tls: "{{ cert_manager_enabled | default(false) | bool }}"
|
||||
dashboard_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
|
||||
|
||||
# Срок жизни сгенерированного токена admin-user (0 = бессрочно, форматы: 1h, 24h, 8760h)
|
||||
dashboard_token_duration: "24h"
|
||||
67
addons/kubernetes-dashboard/role/tasks/main.yml
Normal file
67
addons/kubernetes-dashboard/role/tasks/main.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
- name: Add kubernetes-dashboard Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: kubernetes-dashboard
|
||||
repo_url: "{{ dashboard_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Install Kubernetes Dashboard via Helm
|
||||
kubernetes.core.helm:
|
||||
name: kubernetes-dashboard
|
||||
chart_ref: kubernetes-dashboard/kubernetes-dashboard
|
||||
chart_version: "{{ dashboard_version }}"
|
||||
release_namespace: "{{ dashboard_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "5m0s"
|
||||
values: {}
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Create admin ServiceAccount and ClusterRoleBinding
|
||||
ansible.builtin.template:
|
||||
src: admin-sa.yaml.j2
|
||||
dest: /tmp/dashboard-admin-sa.yaml
|
||||
mode: '0644'
|
||||
|
||||
- name: Apply admin ServiceAccount
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/dashboard-admin-sa.yaml
|
||||
changed_when: true
|
||||
|
||||
- name: Create Dashboard Ingress
|
||||
ansible.builtin.template:
|
||||
src: ingress.yaml.j2
|
||||
dest: /tmp/dashboard-ingress.yaml
|
||||
mode: '0644'
|
||||
when: dashboard_ingress_enabled | bool
|
||||
|
||||
- name: Apply Dashboard Ingress
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/dashboard-ingress.yaml
|
||||
changed_when: true
|
||||
when: dashboard_ingress_enabled | bool
|
||||
|
||||
- name: Generate admin token
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl create token admin-user
|
||||
-n {{ dashboard_namespace }}
|
||||
--duration={{ dashboard_token_duration }}
|
||||
register: dashboard_token
|
||||
changed_when: true
|
||||
|
||||
- name: Show Dashboard access info
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
Kubernetes Dashboard установлен.
|
||||
|
||||
{% if dashboard_ingress_enabled | bool %}
|
||||
URL: https://{{ dashboard_ingress_host }}
|
||||
{% else %}
|
||||
Port-forward: kubectl port-forward -n {{ dashboard_namespace }} svc/kubernetes-dashboard-kong-proxy 8443:443
|
||||
Открой: https://localhost:8443
|
||||
{% endif %}
|
||||
|
||||
Токен администратора (действует {{ dashboard_token_duration }}):
|
||||
{{ dashboard_token.stdout }}
|
||||
|
||||
Обновить токен: kubectl create token admin-user -n {{ dashboard_namespace }} --duration={{ dashboard_token_duration }}
|
||||
19
addons/kubernetes-dashboard/role/templates/admin-sa.yaml.j2
Normal file
19
addons/kubernetes-dashboard/role/templates/admin-sa.yaml.j2
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: admin-user
|
||||
namespace: {{ dashboard_namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: admin-user
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: admin-user
|
||||
namespace: {{ dashboard_namespace }}
|
||||
31
addons/kubernetes-dashboard/role/templates/ingress.yaml.j2
Normal file
31
addons/kubernetes-dashboard/role/templates/ingress.yaml.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: {{ dashboard_namespace }}
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
{% if dashboard_ingress_tls | bool %}
|
||||
cert-manager.io/cluster-issuer: "{{ dashboard_ingress_cert_issuer }}"
|
||||
{% endif %}
|
||||
spec:
|
||||
ingressClassName: {{ dashboard_ingress_class }}
|
||||
rules:
|
||||
- host: {{ dashboard_ingress_host }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: kubernetes-dashboard-kong-proxy
|
||||
port:
|
||||
number: 443
|
||||
{% if dashboard_ingress_tls | bool %}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ dashboard_ingress_host }}
|
||||
secretName: dashboard-tls
|
||||
{% endif %}
|
||||
6
addons/longhorn/playbook.yml
Normal file
6
addons/longhorn/playbook.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Install Longhorn
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
25
addons/longhorn/role/defaults/main.yml
Normal file
25
addons/longhorn/role/defaults/main.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
longhorn_version: "1.7.0"
|
||||
longhorn_namespace: "longhorn-system"
|
||||
longhorn_chart_repo: "https://charts.longhorn.io"
|
||||
|
||||
# Количество реплик по умолчанию — не должно превышать число нод с диском
|
||||
longhorn_default_replica_count: 2
|
||||
longhorn_data_path: "/var/lib/longhorn"
|
||||
|
||||
# false — не вытеснять NFS StorageClass как default
|
||||
longhorn_storage_class_default: false
|
||||
|
||||
longhorn_ingress_enabled: false
|
||||
longhorn_ingress_host: "longhorn.example.com"
|
||||
longhorn_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
|
||||
longhorn_ingress_tls: "{{ cert_manager_enabled | default(false) | bool }}"
|
||||
longhorn_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
|
||||
|
||||
longhorn_resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
77
addons/longhorn/role/tasks/main.yml
Normal file
77
addons/longhorn/role/tasks/main.yml
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
- name: Check open-iscsi
|
||||
ansible.builtin.command: systemctl is-active iscsid
|
||||
register: iscsi_status
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
become: true
|
||||
|
||||
- name: Warn if iscsid is not running
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
ВНИМАНИЕ: iscsid не запущен — Longhorn может работать некорректно.
|
||||
Убедись, что open-iscsi установлен и запущен:
|
||||
apt install open-iscsi && systemctl enable --now iscsid
|
||||
open-iscsi должен быть в k3s_common_packages (он там есть по умолчанию).
|
||||
when: iscsi_status.rc != 0
|
||||
|
||||
- name: Add Longhorn Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: longhorn
|
||||
repo_url: "{{ longhorn_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Install Longhorn via Helm
|
||||
kubernetes.core.helm:
|
||||
name: longhorn
|
||||
chart_ref: longhorn/longhorn
|
||||
chart_version: "{{ longhorn_version }}"
|
||||
release_namespace: "{{ longhorn_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "10m0s"
|
||||
values:
|
||||
defaultSettings:
|
||||
defaultReplicaCount: "{{ longhorn_default_replica_count }}"
|
||||
defaultDataPath: "{{ longhorn_data_path }}"
|
||||
persistence:
|
||||
defaultClassReplicaCount: "{{ longhorn_default_replica_count }}"
|
||||
defaultClass: "{{ longhorn_storage_class_default | bool }}"
|
||||
longhornManager:
|
||||
resources: "{{ longhorn_resources }}"
|
||||
longhornDriver:
|
||||
resources: "{{ longhorn_resources }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Create Longhorn Ingress
|
||||
ansible.builtin.template:
|
||||
src: ingress.yaml.j2
|
||||
dest: /tmp/longhorn-ingress.yaml
|
||||
mode: '0644'
|
||||
when: longhorn_ingress_enabled | bool
|
||||
|
||||
- name: Apply Longhorn Ingress
|
||||
ansible.builtin.command: k3s kubectl apply -f /tmp/longhorn-ingress.yaml
|
||||
changed_when: true
|
||||
when: longhorn_ingress_enabled | bool
|
||||
|
||||
- name: Show Longhorn access info
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
Longhorn установлен.
|
||||
|
||||
StorageClass: longhorn (default: {{ longhorn_storage_class_default }})
|
||||
Replicas: {{ longhorn_default_replica_count }}
|
||||
Data path: {{ longhorn_data_path }}
|
||||
|
||||
{% if longhorn_ingress_enabled | bool %}
|
||||
UI: http{{ 's' if longhorn_ingress_tls | bool else '' }}://{{ longhorn_ingress_host }}
|
||||
{% else %}
|
||||
Port-forward: kubectl port-forward svc/longhorn-frontend -n {{ longhorn_namespace }} 8080:80
|
||||
Открой: http://localhost:8080
|
||||
{% endif %}
|
||||
|
||||
Для single-node кластера задай replicas=1:
|
||||
make addon-longhorn ARGS="-e longhorn_default_replica_count=1"
|
||||
31
addons/longhorn/role/templates/ingress.yaml.j2
Normal file
31
addons/longhorn/role/templates/ingress.yaml.j2
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-frontend
|
||||
namespace: {{ longhorn_namespace }}
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
|
||||
{% if longhorn_ingress_tls | bool %}
|
||||
cert-manager.io/cluster-issuer: "{{ longhorn_ingress_cert_issuer }}"
|
||||
{% endif %}
|
||||
spec:
|
||||
ingressClassName: {{ longhorn_ingress_class }}
|
||||
rules:
|
||||
- host: {{ longhorn_ingress_host }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: longhorn-frontend
|
||||
port:
|
||||
number: 80
|
||||
{% if longhorn_ingress_tls | bool %}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ longhorn_ingress_host }}
|
||||
secretName: longhorn-tls
|
||||
{% endif %}
|
||||
7
addons/metrics-server/playbook.yml
Normal file
7
addons/metrics-server/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install metrics-server
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
15
addons/metrics-server/role/defaults/main.yml
Normal file
15
addons/metrics-server/role/defaults/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
metrics_server_version: "3.12.1"
|
||||
metrics_server_namespace: "kube-system"
|
||||
metrics_server_chart_repo: "https://kubernetes-sigs.github.io/metrics-server/"
|
||||
|
||||
# Обязательно для k3s: кубелеты используют самоподписанные сертификаты
|
||||
metrics_server_kubelet_insecure_tls: true
|
||||
|
||||
metrics_server_resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
44
addons/metrics-server/role/tasks/main.yml
Normal file
44
addons/metrics-server/role/tasks/main.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
- name: Add metrics-server Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: metrics-server
|
||||
repo_url: "{{ metrics_server_chart_repo }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Install metrics-server via Helm
|
||||
kubernetes.core.helm:
|
||||
name: metrics-server
|
||||
chart_ref: metrics-server/metrics-server
|
||||
chart_version: "{{ metrics_server_version }}"
|
||||
release_namespace: "{{ metrics_server_namespace }}"
|
||||
create_namespace: false
|
||||
wait: true
|
||||
timeout: "3m0s"
|
||||
values:
|
||||
args:
|
||||
- --kubelet-insecure-tls
|
||||
resources: "{{ metrics_server_resources }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Wait for metrics-server to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ metrics_server_namespace }}
|
||||
rollout status deployment/metrics-server --timeout=120s
|
||||
changed_when: false
|
||||
retries: 3
|
||||
delay: 10
|
||||
|
||||
- name: Test node metrics (may need 60s to scrape first time)
|
||||
ansible.builtin.command: k3s kubectl top nodes
|
||||
register: top_nodes
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
retries: 6
|
||||
delay: 10
|
||||
until: top_nodes.rc == 0
|
||||
|
||||
- name: Show node metrics
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ top_nodes.stdout_lines if top_nodes.rc == 0 else ['metrics ещё собираются, подожди ~60 сек и запусти: kubectl top nodes'] }}"
|
||||
7
addons/nfs-server/playbook.yml
Normal file
7
addons/nfs-server/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install NFS server
|
||||
hosts: nfs_server
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
21
addons/nfs-server/role/defaults/main.yml
Normal file
21
addons/nfs-server/role/defaults/main.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
# NFS экспорты — список точек монтирования
|
||||
nfs_exports:
|
||||
- path: /storage/nfs
|
||||
options: "*(rw,sync,no_subtree_check,no_root_squash)"
|
||||
|
||||
# Разрешённая подсеть для NFS (для firewall)
|
||||
nfs_allowed_network: "192.168.1.0/24"
|
||||
|
||||
# Пакеты NFS сервера
|
||||
nfs_server_packages:
|
||||
- nfs-kernel-server
|
||||
- nfs-common
|
||||
|
||||
# Создавать директории если не существуют
|
||||
nfs_create_export_dirs: true
|
||||
|
||||
# Права на экспортируемые директории
|
||||
nfs_export_dir_mode: "0777"
|
||||
nfs_export_dir_owner: "nobody"
|
||||
nfs_export_dir_group: "nogroup"
|
||||
11
addons/nfs-server/role/handlers/main.yml
Normal file
11
addons/nfs-server/role/handlers/main.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Re-export NFS shares
|
||||
ansible.builtin.command: exportfs -ra
|
||||
become: true
|
||||
changed_when: true
|
||||
|
||||
- name: Restart NFS server
|
||||
ansible.builtin.systemd:
|
||||
name: nfs-kernel-server
|
||||
state: restarted
|
||||
become: true
|
||||
7
addons/nfs-server/role/meta/main.yml
Normal file
7
addons/nfs-server/role/meta/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "your-name"
|
||||
description: "Configure NFS server for Kubernetes persistent storage"
|
||||
license: "MIT"
|
||||
min_ansible_version: "2.12"
|
||||
dependencies: []
|
||||
58
addons/nfs-server/role/tasks/main.yml
Normal file
58
addons/nfs-server/role/tasks/main.yml
Normal file
@@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: Install NFS server packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ nfs_server_packages }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
|
||||
- name: Create NFS export directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: directory
|
||||
mode: "{{ nfs_export_dir_mode }}"
|
||||
owner: "{{ nfs_export_dir_owner }}"
|
||||
group: "{{ nfs_export_dir_group }}"
|
||||
loop: "{{ nfs_exports }}"
|
||||
become: true
|
||||
when: nfs_create_export_dirs
|
||||
|
||||
- name: Configure /etc/exports
|
||||
ansible.builtin.template:
|
||||
src: exports.j2
|
||||
dest: /etc/exports
|
||||
mode: '0644'
|
||||
backup: true
|
||||
become: true
|
||||
notify:
|
||||
- Re-export NFS shares
|
||||
- Restart NFS server
|
||||
|
||||
- name: Enable and start NFS server
|
||||
ansible.builtin.systemd:
|
||||
name: nfs-kernel-server
|
||||
enabled: true
|
||||
state: started
|
||||
become: true
|
||||
|
||||
- name: Allow NFS through UFW (if active)
|
||||
community.general.ufw:
|
||||
rule: allow
|
||||
src: "{{ nfs_allowed_network }}"
|
||||
port: "{{ item }}"
|
||||
proto: tcp
|
||||
loop:
|
||||
- "2049" # NFS
|
||||
- "111" # RPC portmapper
|
||||
become: true
|
||||
failed_when: false # UFW может быть не установлен
|
||||
|
||||
- name: Verify NFS exports are active
|
||||
ansible.builtin.command: exportfs -v
|
||||
register: nfs_exportfs
|
||||
become: true
|
||||
changed_when: false
|
||||
|
||||
- name: Show active NFS exports
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ nfs_exportfs.stdout_lines }}"
|
||||
9
addons/nfs-server/role/templates/exports.j2
Normal file
9
addons/nfs-server/role/templates/exports.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
# /etc/exports — управляется Ansible (roles/nfs-server)
|
||||
# Изменения вручную будут перезаписаны!
|
||||
#
|
||||
# Формат: <директория> <опции>
|
||||
# Документация: man exports
|
||||
|
||||
{% for export in nfs_exports %}
|
||||
{{ export.path }} {{ export.options }}
|
||||
{% endfor %}
|
||||
7
addons/prometheus-stack/playbook.yml
Normal file
7
addons/prometheus-stack/playbook.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Install kube-prometheus-stack
|
||||
hosts: k3s_master[0]
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ playbook_dir }}/role"
|
||||
60
addons/prometheus-stack/role/defaults/main.yml
Normal file
60
addons/prometheus-stack/role/defaults/main.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
prometheus_stack_version: "60.3.0" # Helm chart version
|
||||
prometheus_stack_namespace: "monitoring"
|
||||
prometheus_stack_release_name: "prom"
|
||||
|
||||
prometheus_stack_chart_repo: "https://prometheus-community.github.io/helm-charts"
|
||||
prometheus_stack_chart_name: "kube-prometheus-stack"
|
||||
|
||||
# Grafana
|
||||
prometheus_grafana_enabled: true
|
||||
|
||||
# Логин и пароль администратора Grafana.
|
||||
# Рекомендуется задавать через Ansible Vault:
|
||||
# group_vars/all/vault.yml:
|
||||
# vault_grafana_user: "admin"
|
||||
# vault_grafana_password: "ваш-пароль"
|
||||
grafana_admin_user: "{{ vault_grafana_user | default('admin') }}"
|
||||
prometheus_grafana_admin_password: "{{ vault_grafana_password | default('admin') }}"
|
||||
|
||||
prometheus_grafana_ingress_enabled: false
|
||||
prometheus_grafana_ingress_host: "grafana.local"
|
||||
prometheus_grafana_ingress_class: "nginx"
|
||||
|
||||
# Prometheus
|
||||
prometheus_retention_days: 7
|
||||
prometheus_storage_size: "10Gi" # Размер PVC для данных Prometheus
|
||||
prometheus_storage_class: "" # "" = использовать default StorageClass (nfs-client)
|
||||
|
||||
# Grafana PVC
|
||||
grafana_storage_enabled: true
|
||||
grafana_storage_size: "5Gi" # Размер PVC для дашбордов и плагинов Grafana
|
||||
grafana_storage_class: "" # "" = использовать default StorageClass
|
||||
|
||||
# Alertmanager
|
||||
prometheus_alertmanager_enabled: true
|
||||
prometheus_alertmanager_storage_size: "2Gi"
|
||||
|
||||
# Node exporter (метрики хостов)
|
||||
prometheus_node_exporter_enabled: true
|
||||
|
||||
# kube-state-metrics
|
||||
prometheus_kube_state_metrics_enabled: true
|
||||
|
||||
# Ресурсы Prometheus
|
||||
prometheus_resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 2Gi
|
||||
|
||||
# Ресурсы Grafana
|
||||
grafana_resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 256Mi
|
||||
6
addons/prometheus-stack/role/meta/main.yml
Normal file
6
addons/prometheus-stack/role/meta/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
galaxy_info:
|
||||
role_name: prometheus-stack
|
||||
description: Deploy kube-prometheus-stack (Prometheus + Grafana + Alertmanager) via Helm on K3S
|
||||
min_ansible_version: "2.14"
|
||||
dependencies: []
|
||||
54
addons/prometheus-stack/role/molecule/default/converge.yml
Normal file
54
addons/prometheus-stack/role/molecule/default/converge.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
- name: Converge — prometheus-stack template tests
|
||||
hosts: all
|
||||
become: false
|
||||
gather_facts: false
|
||||
|
||||
vars:
|
||||
prometheus_stack_enabled: true
|
||||
prometheus_stack_namespace: "monitoring"
|
||||
prometheus_stack_release_name: "prom"
|
||||
prometheus_stack_chart_name: "kube-prometheus-stack"
|
||||
|
||||
prometheus_grafana_enabled: true
|
||||
grafana_admin_user: "admin"
|
||||
prometheus_grafana_admin_password: "molecule-test-pass"
|
||||
prometheus_grafana_ingress_enabled: false
|
||||
prometheus_grafana_ingress_host: "grafana.local"
|
||||
prometheus_grafana_ingress_class: "nginx"
|
||||
|
||||
grafana_storage_enabled: true
|
||||
grafana_storage_size: "5Gi"
|
||||
grafana_storage_class: ""
|
||||
|
||||
prometheus_retention_days: 7
|
||||
prometheus_storage_size: "10Gi"
|
||||
prometheus_storage_class: ""
|
||||
|
||||
prometheus_alertmanager_enabled: true
|
||||
prometheus_alertmanager_storage_size: "2Gi"
|
||||
prometheus_node_exporter_enabled: true
|
||||
prometheus_kube_state_metrics_enabled: true
|
||||
|
||||
prometheus_resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 2Gi
|
||||
|
||||
grafana_resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 256Mi
|
||||
|
||||
tasks:
|
||||
- name: Render kube-prometheus-stack Helm values
|
||||
ansible.builtin.template:
|
||||
src: "{{ playbook_dir }}/../../templates/prometheus-stack-values.yaml.j2"
|
||||
dest: /tmp/prometheus-stack-values.yaml
|
||||
mode: '0644'
|
||||
26
addons/prometheus-stack/role/molecule/default/molecule.yml
Normal file
26
addons/prometheus-stack/role/molecule/default/molecule.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
driver:
|
||||
name: docker
|
||||
|
||||
platforms:
|
||||
# master01 — единственная нода нужна, шаблоны деплоятся с первого мастера
|
||||
- name: master01
|
||||
image: geerlingguy/docker-ubuntu2204-ansible:latest
|
||||
pre_build_image: true
|
||||
|
||||
provisioner:
|
||||
name: ansible
|
||||
playbooks:
|
||||
converge: converge.yml
|
||||
verify: verify.yml
|
||||
config_options:
|
||||
defaults:
|
||||
interpreter_python: auto_silent
|
||||
|
||||
verifier:
|
||||
name: ansible
|
||||
|
||||
lint: |
|
||||
set -e
|
||||
yamllint .
|
||||
ansible-lint
|
||||
80
addons/prometheus-stack/role/molecule/default/verify.yml
Normal file
80
addons/prometheus-stack/role/molecule/default/verify.yml
Normal file
@@ -0,0 +1,80 @@
|
||||
---
|
||||
- name: Verify — prometheus-stack templates
|
||||
hosts: all
|
||||
become: false
|
||||
gather_facts: false
|
||||
|
||||
tasks:
|
||||
- name: Read rendered Helm values
|
||||
ansible.builtin.slurp:
|
||||
src: /tmp/prometheus-stack-values.yaml
|
||||
register: values_raw
|
||||
|
||||
- name: Parse YAML
|
||||
ansible.builtin.set_fact:
|
||||
v: "{{ values_raw.content | b64decode | from_yaml }}"
|
||||
|
||||
# ── Grafana ─────────────────────────────────────────────────────────────────
|
||||
- name: Assert grafana block exists
|
||||
ansible.builtin.assert:
|
||||
that: v.grafana is defined
|
||||
fail_msg: "Блок grafana отсутствует в values"
|
||||
|
||||
- name: Assert grafana adminUser
|
||||
ansible.builtin.assert:
|
||||
that: v.grafana.adminUser == 'admin'
|
||||
fail_msg: "grafana.adminUser неверный: {{ v.grafana.adminUser }}"
|
||||
|
||||
- name: Assert grafana adminPassword is set
|
||||
ansible.builtin.assert:
|
||||
that: v.grafana.adminPassword | length > 0
|
||||
fail_msg: "grafana.adminPassword не задан"
|
||||
|
||||
- name: Assert grafana persistence is enabled
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- v.grafana.persistence is defined
|
||||
- v.grafana.persistence.enabled == true
|
||||
- v.grafana.persistence.size == '5Gi'
|
||||
fail_msg: "grafana.persistence настроена неверно: {{ v.grafana.persistence }}"
|
||||
|
||||
# ── Prometheus ──────────────────────────────────────────────────────────────
|
||||
- name: Assert prometheus block exists
|
||||
ansible.builtin.assert:
|
||||
that: v.prometheus.prometheusSpec is defined
|
||||
fail_msg: "Блок prometheus.prometheusSpec отсутствует"
|
||||
|
||||
- name: Assert prometheus retention
|
||||
ansible.builtin.assert:
|
||||
that: v.prometheus.prometheusSpec.retention == '7d'
|
||||
fail_msg: "Неверный retention: {{ v.prometheus.prometheusSpec.retention }}"
|
||||
|
||||
- name: Assert prometheus PVC storage
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- v.prometheus.prometheusSpec.storageSpec is defined
|
||||
- v.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage == '10Gi'
|
||||
fail_msg: "Prometheus PVC настроен неверно"
|
||||
|
||||
# ── Alertmanager ────────────────────────────────────────────────────────────
|
||||
- name: Assert alertmanager is enabled
|
||||
ansible.builtin.assert:
|
||||
that: v.alertmanager.enabled == true
|
||||
fail_msg: "alertmanager.enabled должен быть true"
|
||||
|
||||
- name: Assert alertmanager storage
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- v.alertmanager.alertmanagerSpec.storage is defined
|
||||
- v.alertmanager.alertmanagerSpec.storage.volumeClaimTemplate.spec.resources.requests.storage == '2Gi'
|
||||
fail_msg: "Alertmanager PVC настроен неверно"
|
||||
|
||||
# ── Node Exporter & kube-state-metrics ──────────────────────────────────────
|
||||
- name: Assert nodeExporter enabled
|
||||
ansible.builtin.assert:
|
||||
that: v.nodeExporter.enabled == true
|
||||
fail_msg: "nodeExporter.enabled должен быть true"
|
||||
|
||||
- name: Summary
|
||||
ansible.builtin.debug:
|
||||
msg: "Все проверки prometheus-stack прошли успешно"
|
||||
100
addons/prometheus-stack/role/tasks/main.yml
Normal file
100
addons/prometheus-stack/role/tasks/main.yml
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
- name: Add prometheus-community Helm repo
|
||||
kubernetes.core.helm_repository:
|
||||
name: prometheus-community
|
||||
repo_url: "{{ prometheus_stack_chart_repo }}"
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Update Helm repos
|
||||
ansible.builtin.command: helm repo update
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Create monitoring namespace
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl create namespace {{ prometheus_stack_namespace }}
|
||||
--dry-run=client -o yaml | k3s kubectl apply -f -
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
changed_when: false
|
||||
|
||||
- name: Template kube-prometheus-stack values
|
||||
ansible.builtin.template:
|
||||
src: prometheus-stack-values.yaml.j2
|
||||
dest: /tmp/prometheus-stack-values.yaml
|
||||
mode: '0644'
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Deploy kube-prometheus-stack via Helm
|
||||
kubernetes.core.helm:
|
||||
name: "{{ prometheus_stack_release_name }}"
|
||||
chart_ref: "prometheus-community/{{ prometheus_stack_chart_name }}"
|
||||
chart_version: "{{ prometheus_stack_version }}"
|
||||
release_namespace: "{{ prometheus_stack_namespace }}"
|
||||
create_namespace: true
|
||||
wait: true
|
||||
timeout: "10m0s"
|
||||
values_files:
|
||||
- /tmp/prometheus-stack-values.yaml
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
environment:
|
||||
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
|
||||
|
||||
- name: Wait for Prometheus to be ready
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ prometheus_stack_namespace }}
|
||||
rollout status deployment/{{ prometheus_stack_release_name }}-grafana
|
||||
--timeout=180s
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: grafana_ready
|
||||
changed_when: false
|
||||
retries: 3
|
||||
delay: 15
|
||||
until: grafana_ready.rc == 0
|
||||
|
||||
- name: Get Grafana admin password (from secret)
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ prometheus_stack_namespace }}
|
||||
get secret {{ prometheus_stack_release_name }}-grafana
|
||||
-o jsonpath="{.data.admin-password}"
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: grafana_secret
|
||||
changed_when: false
|
||||
|
||||
- name: Show Grafana access info
|
||||
ansible.builtin.debug:
|
||||
msg:
|
||||
- "Grafana URL: http://{{ hostvars[groups['k3s_master'][0]]['ansible_host'] }}:3000 (NodePort) или через Ingress"
|
||||
- "Admin user: admin"
|
||||
- "Admin password: {{ grafana_secret.stdout | b64decode }}"
|
||||
run_once: true
|
||||
|
||||
- name: Show monitoring namespace pods
|
||||
ansible.builtin.command: >
|
||||
k3s kubectl -n {{ prometheus_stack_namespace }} get pods
|
||||
become: true
|
||||
delegate_to: "{{ groups['k3s_master'][0] }}"
|
||||
run_once: true
|
||||
register: prom_pods
|
||||
changed_when: false
|
||||
|
||||
- name: Monitoring pods
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ prom_pods.stdout_lines }}"
|
||||
run_once: true
|
||||
@@ -0,0 +1,119 @@
|
||||
## kube-prometheus-stack Helm values
|
||||
## Управляется Ansible (roles/prometheus-stack)
|
||||
|
||||
grafana:
|
||||
enabled: {{ prometheus_grafana_enabled | lower }}
|
||||
adminUser: "{{ grafana_admin_user }}"
|
||||
adminPassword: "{{ prometheus_grafana_admin_password }}"
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "{{ grafana_resources.requests.cpu }}"
|
||||
memory: "{{ grafana_resources.requests.memory }}"
|
||||
limits:
|
||||
cpu: "{{ grafana_resources.limits.cpu }}"
|
||||
memory: "{{ grafana_resources.limits.memory }}"
|
||||
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
{% if prometheus_grafana_ingress_enabled %}
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: "{{ prometheus_grafana_ingress_class }}"
|
||||
hosts:
|
||||
- "{{ prometheus_grafana_ingress_host }}"
|
||||
paths:
|
||||
- /
|
||||
{% else %}
|
||||
service:
|
||||
type: NodePort
|
||||
nodePort: 32000
|
||||
{% endif %}
|
||||
|
||||
# Готовые дашборды
|
||||
defaultDashboardsEnabled: true
|
||||
defaultDashboardsTimezone: utc
|
||||
|
||||
persistence:
|
||||
enabled: {{ grafana_storage_enabled | lower }}
|
||||
type: pvc
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: "{{ grafana_storage_size }}"
|
||||
{% if grafana_storage_class %}
|
||||
storageClassName: "{{ grafana_storage_class }}"
|
||||
{% endif %}
|
||||
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
retention: "{{ prometheus_retention_days }}d"
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "{{ prometheus_resources.requests.cpu }}"
|
||||
memory: "{{ prometheus_resources.requests.memory }}"
|
||||
limits:
|
||||
cpu: "{{ prometheus_resources.limits.cpu }}"
|
||||
memory: "{{ prometheus_resources.limits.memory }}"
|
||||
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
storageSpec:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
{% if prometheus_storage_class %}
|
||||
storageClassName: "{{ prometheus_storage_class }}"
|
||||
{% endif %}
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "{{ prometheus_storage_size }}"
|
||||
|
||||
# Собирать метрики со всех namespaces
|
||||
serviceMonitorSelectorNilUsesHelmValues: false
|
||||
podMonitorSelectorNilUsesHelmValues: false
|
||||
ruleSelectorNilUsesHelmValues: false
|
||||
|
||||
alertmanager:
|
||||
enabled: {{ prometheus_alertmanager_enabled | lower }}
|
||||
alertmanagerSpec:
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
storage:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
{% if prometheus_storage_class %}
|
||||
storageClassName: "{{ prometheus_storage_class }}"
|
||||
{% endif %}
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "{{ prometheus_alertmanager_storage_size }}"
|
||||
|
||||
nodeExporter:
|
||||
enabled: {{ prometheus_node_exporter_enabled | lower }}
|
||||
# DaemonSet — запускается на всех нодах включая мастера
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
|
||||
kube-state-metrics:
|
||||
enabled: {{ prometheus_kube_state_metrics_enabled | lower }}
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
# Не устанавливать дополнительный Prometheus Operator если уже есть
|
||||
prometheusOperator:
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
Reference in New Issue
Block a user