- YAML inventory с мультигруппами в create.yml - Vault preflight проверки в converge.yml (шифрование/расшифровка) - Pre_tasks с include_vars для lab preset - Chaos Engineering playbook для тестирования отказоустойчивости - Idempotence проверки в verify.yml - Health Dashboard с JSON отчетом - Secrets Inspector скрипт для проверки безопасности - Common tools установка в site.yml Новые команды: - make chaos - запуск Chaos Engineering тестов - make check-secrets - проверка безопасности секретов - make idempotence - проверка идемпотентности Обновления в файлах: - molecule/universal/create.yml: добавлена генерация YAML inventory - molecule/universal/molecule.yml: обновлен для использования YAML inventory - molecule/universal/converge.yml: добавлены vault preflight проверки - molecule/universal/verify.yml: добавлены idempotence и health dashboard - files/playbooks/chaos.yml: новый Chaos Engineering playbook - files/playbooks/site.yml: добавлены common tools - scripts/secret_scan.sh: новый Secrets Inspector - Makefile: добавлены новые команды - README.md: обновлена документация Преимущества: - Мультигруппы в YAML inventory для сложных конфигураций - Автоматическая проверка и нормализация vault файлов - Тестирование отказоустойчивости через Chaos Engineering - Проверка идемпотентности для качества ролей - Health Dashboard для мониторинга состояния лаборатории - Secrets Inspector для безопасности - Установка common tools для всех хостов Автор: Сергей Антропов Сайт: https://devops.org.ru
		
			
				
	
	
		
			297 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
			
		
		
	
	
			297 lines
		
	
	
		
			13 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
| ---
 | |
| # Создание инфраструктуры универсальной лаборатории
 | |
| # Автор: Сергей Антропов
 | |
| # Сайт: https://devops.org.ru
 | |
| 
 | |
| - hosts: localhost
 | |
|   gather_facts: false
 | |
|   vars_files:
 | |
|     - vars.yml
 | |
|   tasks:
 | |
|     - name: Ensure network exists
 | |
|       community.docker.docker_network:
 | |
|         name: "{{ docker_network }}"
 | |
|         state: present
 | |
| 
 | |
|     - name: Pull systemd images
 | |
|       community.docker.docker_image:
 | |
|         name: "{{ images[item.family] }}"
 | |
|         source: pull
 | |
|       loop: "{{ hosts | selectattr('type','undefined') | list }}"
 | |
|       loop_control:
 | |
|         label: "{{ item.name }}"
 | |
| 
 | |
|     - name: Start systemd nodes
 | |
|       community.docker.docker_container:
 | |
|         name: "{{ item.name }}"
 | |
|         image: "{{ images[item.family] }}"
 | |
|         networks:
 | |
|           - name: "{{ docker_network }}"
 | |
|         privileged: "{{ systemd_defaults.privileged }}"
 | |
|         command: "{{ systemd_defaults.command }}"
 | |
|         volumes: "{{ systemd_defaults.volumes }}"
 | |
|         tmpfs: "{{ systemd_defaults.tmpfs }}"
 | |
|         capabilities: "{{ systemd_defaults.capabilities }}"
 | |
|         published_ports: "{{ item.publish | default([]) }}"
 | |
|         state: started
 | |
|         restart_policy: unless-stopped
 | |
|       loop: "{{ hosts | selectattr('type','undefined') | list }}"
 | |
|       loop_control:
 | |
|         label: "{{ item.name }}"
 | |
| 
 | |
|     - name: Start DinD nodes
 | |
|       community.docker.docker_container:
 | |
|         name: "{{ item.name }}"
 | |
|         image: "docker:27-dind"
 | |
|         privileged: true
 | |
|         environment:
 | |
|           DOCKER_TLS_CERTDIR: ""
 | |
|         networks:
 | |
|           - name: "{{ docker_network }}"
 | |
|         published_ports: "{{ item.publish | default([]) }}"
 | |
|         volumes:
 | |
|           - "{{ item.name }}-docker:/var/lib/docker"
 | |
|         state: started
 | |
|         restart_policy: unless-stopped
 | |
|       loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list }}"
 | |
|       loop_control:
 | |
|         label: "{{ item.name }}"
 | |
| 
 | |
|     - name: Start DOoD nodes
 | |
|       community.docker.docker_container:
 | |
|         name: "{{ item.name }}"
 | |
|         image: "{{ images[item.family] }}"
 | |
|         networks:
 | |
|           - name: "{{ docker_network }}"
 | |
|         privileged: "{{ systemd_defaults.privileged }}"
 | |
|         command: "{{ systemd_defaults.command }}"
 | |
|         volumes:
 | |
|           - "{{ systemd_defaults.volumes | default([]) }}"
 | |
|           - "/var/run/docker.sock:/var/run/docker.sock"
 | |
|         tmpfs: "{{ systemd_defaults.tmpfs }}"
 | |
|         capabilities: "{{ systemd_defaults.capabilities }}"
 | |
|         published_ports: "{{ item.publish | default([]) }}"
 | |
|         state: started
 | |
|         restart_policy: unless-stopped
 | |
|       loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list }}"
 | |
|       loop_control:
 | |
|         label: "{{ item.name }}"
 | |
| 
 | |
|     # ---------- Build multi-group map ----------
 | |
|     - name: Init groups map
 | |
|       set_fact:
 | |
|         groups_map: {}
 | |
| 
 | |
|     - name: Append hosts to groups
 | |
|       set_fact:
 | |
|         groups_map: >-
 | |
|           {{
 | |
|             groups_map | combine(
 | |
|               { item_group: (groups_map[item_group] | default([])) + [item_name] }
 | |
|             )
 | |
|           }}
 | |
|       loop: "{{ (hosts | default([])) | subelements('groups', skip_missing=True) }}"
 | |
|       loop_control:
 | |
|         label: "{{ item.0.name }}"
 | |
|       vars:
 | |
|         item_name: "{{ item.0.name }}"
 | |
|         item_group: "{{ item.1 }}"
 | |
|       when: item.0.groups is defined
 | |
| 
 | |
|     - name: Append hosts to single group
 | |
|       set_fact:
 | |
|         groups_map: >-
 | |
|           {{
 | |
|             groups_map | combine(
 | |
|               { item.group: (groups_map[item.group] | default([])) + [item.name] }
 | |
|             )
 | |
|           }}
 | |
|       loop: "{{ hosts | default([]) }}"
 | |
|       loop_control:
 | |
|         label: "{{ item.name }}"
 | |
|       when: item.group is defined and item.groups is not defined
 | |
| 
 | |
|     # ---------- INI inventory ----------
 | |
|     - name: Render inventory.ini
 | |
|       set_fact:
 | |
|         inv_ini: |
 | |
|           [all:vars]
 | |
|           ansible_connection=community.docker.docker
 | |
|           ansible_python_interpreter=/usr/bin/python3
 | |
| 
 | |
|           {% for group, members in (groups_map | dictsort) %}
 | |
|           [{{ group }}]
 | |
|           {% for h in members %}{{ h }}
 | |
|           {% endfor %}
 | |
| 
 | |
|           {% endfor %}
 | |
|           [all]
 | |
|           {% for h in (hosts | default([])) %}{{ h.name }}
 | |
|           {% endfor %}
 | |
| 
 | |
|     - name: Write hosts.ini
 | |
|       copy:
 | |
|         dest: "{{ generated_inventory }}"
 | |
|         content: "{{ inv_ini }}"
 | |
|         mode: "0644"
 | |
| 
 | |
|     # ---------- YAML inventory (primary, multi-groups) ----------
 | |
|     - name: Build YAML inventory dict
 | |
|       set_fact:
 | |
|         inv_yaml_obj:
 | |
|           all:
 | |
|             vars:
 | |
|               ansible_connection: community.docker.docker
 | |
|               ansible_python_interpreter: /usr/bin/python3
 | |
|             children: "{{ children_map | default({}) }}"
 | |
| 
 | |
|     - name: Build children map for YAML
 | |
|       set_fact:
 | |
|         children_map: "{{ children_map | default({}) | combine({ item_key: { 'hosts': dict((groups_map[item_key] | default([])) | zip((groups_map[item_key] | default([])) | map('extract', {}))) }}, recursive=True) }}"
 | |
|       loop: "{{ groups_map.keys() | list }}"
 | |
|       loop_control:
 | |
|         label: "{{ item }}"
 | |
|       vars:
 | |
|         item_key: "{{ item }}"
 | |
| 
 | |
|     - name: Write hosts.yml
 | |
|       copy:
 | |
|         dest: "{{ molecule_ephemeral_directory }}/inventory/hosts.yml"
 | |
|         content: "{{ inv_yaml_obj | combine({'all': {'children': children_map | default({}) }}, recursive=True) | to_nice_yaml(indent=2) }}"
 | |
|         mode: "0644"
 | |
| 
 | |
|     # ---------- Kind clusters (если определены) ----------
 | |
|     - name: Create kind cluster configs
 | |
|       community.docker.docker_container_exec:
 | |
|         container: ansible-controller
 | |
|         command: >
 | |
|           bash -lc '
 | |
|           mkdir -p /ansible/.kind;
 | |
|           cat > /ansible/.kind/{{ item.name }}.yaml <<EOF
 | |
|           kind: Cluster
 | |
|           apiVersion: kind.x-k8s.io/v1alpha4
 | |
|           nodes:
 | |
|             - role: control-plane
 | |
|               {% if (item.addons|default({})).ingress_nginx|default(false) %}
 | |
|               extraPortMappings:
 | |
|                 - containerPort: 80
 | |
|                   hostPort: {{ item.ingress_host_http_port | default(8081) }}
 | |
|                   protocol: TCP
 | |
|                 - containerPort: 443
 | |
|                   hostPort: {{ item.ingress_host_https_port | default(8443) }}
 | |
|                   protocol: TCP
 | |
|               {% endif %}
 | |
|           {% for i in range(item.workers | default(0)) %}
 | |
|             - role: worker
 | |
|           {% endfor %}
 | |
|           networking:
 | |
|             apiServerAddress: "0.0.0.0"
 | |
|             apiServerPort: {{ item.api_port | default(0) }}
 | |
|           EOF
 | |
|           '
 | |
|       loop: "{{ kind_clusters | default([]) }}"
 | |
|       when: (kind_clusters | default([])) | length > 0
 | |
| 
 | |
|     - name: Create kind clusters
 | |
|       community.docker.docker_container_exec:
 | |
|         container: ansible-controller
 | |
|         command: >
 | |
|           bash -lc '
 | |
|           set -e;
 | |
|           for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
 | |
|             if kind get clusters | grep -qx "$$n"; then
 | |
|               echo "[kind] cluster $$n already exists";
 | |
|             else
 | |
|               echo "[kind] creating $$n";
 | |
|               kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml";
 | |
|             fi
 | |
|           done
 | |
|           '
 | |
|       when: (kind_clusters | default([])) | length > 0
 | |
| 
 | |
|     - name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled)
 | |
|       community.docker.docker_container_exec:
 | |
|         container: ansible-controller
 | |
|         command: >
 | |
|           bash -lc '
 | |
|           set -e;
 | |
|           helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true;
 | |
|           helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true;
 | |
|           helm repo update >/dev/null 2>&1 || true;
 | |
|           for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
 | |
|             # ingress-nginx
 | |
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then
 | |
|               echo "[addons] ingress-nginx on $$n";
 | |
|               kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true;
 | |
|               kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true;
 | |
|             fi
 | |
|             # metrics-server
 | |
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then
 | |
|               echo "[addons] metrics-server on $$n";
 | |
|               kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true;
 | |
|               kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \
 | |
|                 "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true;
 | |
|             fi
 | |
|             # istio (demo profile)
 | |
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
 | |
|               echo "[addons] istio (demo profile) on $$n";
 | |
|               istioctl install -y --set profile=demo --context kind-$$n;
 | |
|               kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true;
 | |
|               kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true;
 | |
|             fi
 | |
|             # kiali (server chart, anonymous auth) — требует istio/metrics
 | |
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then
 | |
|               echo "[addons] kiali on $$n";
 | |
|               kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true;
 | |
|               helm upgrade --install kiali-server kiali/kiali-server \
 | |
|                 --namespace istio-system --kube-context kind-$$n \
 | |
|                 --set auth.strategy=anonymous --wait --timeout 180s;
 | |
|             fi
 | |
|             # kube-prometheus-stack (Prometheus + Grafana)
 | |
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
 | |
|               echo "[addons] kube-prometheus-stack on $$n";
 | |
|               kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true;
 | |
|               helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
 | |
|                 --namespace monitoring --kube-context kind-$$n \
 | |
|                 --set grafana.adminPassword=admin \
 | |
|                 --set grafana.defaultDashboardsTimezone=browser \
 | |
|                 --wait --timeout 600s;
 | |
|               # дождаться графаны
 | |
|               kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true;
 | |
|             fi
 | |
|           done
 | |
|           '
 | |
|       when: (kind_clusters | default([])) | length > 0
 | |
| 
 | |
|     - name: Apply Istio Telemetry + mesh mTLS + Grafana dashboards (per cluster)
 | |
|       community.docker.docker_container_exec:
 | |
|         container: ansible-controller
 | |
|         command: >
 | |
|           bash -lc '
 | |
|           set -e;
 | |
|           for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map("quote") | join(" ") }}; do
 | |
|             # Telemetry/mTLS — только если istio есть
 | |
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
 | |
|               echo "[istio] applying Telemetry + PeerAuthentication on $$n";
 | |
|               kubectl --context kind-$$n -n istio-system apply -f /ansible/files/k8s/istio/telemetry.yaml || true;
 | |
|               kubectl --context kind-$$n -n istio-system apply -f /ansible/files/k8s/istio/trafficpolicy.yaml --dry-run=client -o yaml >/dev/null 2>&1 || true;
 | |
|               # DestinationRule из trafficpolicy — namespace bookinfo, создадим позже в verify после деплоя
 | |
|             fi
 | |
| 
 | |
|             # Grafana dashboards (ConfigMap with label grafana_dashboard=1)
 | |
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
 | |
|               echo "[grafana] provisioning dashboards on $$n";
 | |
|               kubectl --context kind-$$n -n monitoring create configmap dashboard-istio-overview \
 | |
|                 --from-file=dashboard.json=/ansible/files/grafana/dashboards/istio-overview.json \
 | |
|                 --dry-run=client -o yaml | kubectl --context kind-$$n apply -f -;
 | |
|               kubectl --context kind-$$n -n monitoring label configmap dashboard-istio-overview grafana_dashboard=1 --overwrite;
 | |
| 
 | |
|               kubectl --context kind-$$n -n monitoring create configmap dashboard-service-sli \
 | |
|                 --from-file=dashboard.json=/ansible/files/grafana/dashboards/service-sli.json \
 | |
|                 --dry-run=client -o yaml | kubectl --context kind-$$n apply -f -;
 | |
|               kubectl --context kind-$$n -n monitoring label configmap dashboard-service-sli grafana_dashboard=1 --overwrite;
 | |
|             fi
 | |
|           done
 | |
|           '
 | |
|       when: (kind_clusters | default([])) | length > 0
 |