refactor: удалить лишние задачи Kind из create.yml и destroy.yml
	
		
			
	
		
	
	
		
	
		
			Some checks failed
		
		
	
	
		
			
				
	
				Ansible Testing / lint (push) Has been cancelled
				
			
		
			
				
	
				Ansible Testing / test (default) (push) Has been cancelled
				
			
		
			
				
	
				Ansible Testing / test (minimal) (push) Has been cancelled
				
			
		
			
				
	
				Ansible Testing / test (performance) (push) Has been cancelled
				
			
		
			
				
	
				Ansible Testing / deploy-check (push) Has been cancelled
				
			
		
		
	
	
				
					
				
			
		
			Some checks failed
		
		
	
	Ansible Testing / lint (push) Has been cancelled
				
			Ansible Testing / test (default) (push) Has been cancelled
				
			Ansible Testing / test (minimal) (push) Has been cancelled
				
			Ansible Testing / test (performance) (push) Has been cancelled
				
			Ansible Testing / deploy-check (push) Has been cancelled
				
			- Удалены все задачи по созданию Kind кластеров из create.yml - Удалены все задачи по удалению Kind кластеров из destroy.yml - Добавлены комментарии о том, что все операции с Kind выполняются через make k8s - Теперь Kind кластеры полностью управляются через Python скрипт create_k8s_cluster.py
This commit is contained in:
		| @@ -285,133 +285,5 @@ | ||||
|           - DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }} | ||||
|           - DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }} | ||||
|  | ||||
|     # ---------- Kind clusters (если определены) ---------- | ||||
|     - name: Create kind cluster configs | ||||
|       community.docker.docker_container_exec: | ||||
|         container: ansible-controller | ||||
|         command: > | ||||
|           bash -lc ' | ||||
|           mkdir -p /ansible/.kind; | ||||
|           cat > /ansible/.kind/{{ item.name }}.yaml <<EOF | ||||
|           kind: Cluster | ||||
|           apiVersion: kind.x-k8s.io/v1alpha4 | ||||
|           nodes: | ||||
|             - role: control-plane | ||||
|               {% if (item.addons|default({})).ingress_nginx|default(false) %} | ||||
|               extraPortMappings: | ||||
|                 - containerPort: 80 | ||||
|                   hostPort: {{ item.ingress_host_http_port | default(8081) }} | ||||
|                   protocol: TCP | ||||
|                 - containerPort: 443 | ||||
|                   hostPort: {{ item.ingress_host_https_port | default(8443) }} | ||||
|                   protocol: TCP | ||||
|               {% endif %} | ||||
|           {% for i in range(item.workers | default(0)) %} | ||||
|             - role: worker | ||||
|           {% endfor %} | ||||
|           networking: | ||||
|             apiServerAddress: "0.0.0.0" | ||||
|             apiServerPort: {{ item.api_port | default(0) }} | ||||
|           EOF | ||||
|           ' | ||||
|       loop: "{{ kind_clusters | default([]) }}" | ||||
|       when: (kind_clusters | default([])) | length > 0 | ||||
|  | ||||
|     - name: Create kind clusters | ||||
|       community.docker.docker_container_exec: | ||||
|         container: ansible-controller | ||||
|         command: > | ||||
|           bash -lc ' | ||||
|           set -e; | ||||
|           for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do | ||||
|             if kind get clusters | grep -qx "$$n"; then | ||||
|               echo "[kind] cluster $$n already exists"; | ||||
|             else | ||||
|               echo "[kind] creating $$n"; | ||||
|               kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml"; | ||||
|             fi | ||||
|           done | ||||
|           ' | ||||
|       when: (kind_clusters | default([])) | length > 0 | ||||
|  | ||||
|     - name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled) | ||||
|       community.docker.docker_container_exec: | ||||
|         container: ansible-controller | ||||
|         command: > | ||||
|           bash -lc ' | ||||
|           set -e; | ||||
|           helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true; | ||||
|           helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true; | ||||
|           helm repo update >/dev/null 2>&1 || true; | ||||
|           for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do | ||||
|             # ingress-nginx | ||||
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then | ||||
|               echo "[addons] ingress-nginx on $$n"; | ||||
|               kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true; | ||||
|               kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true; | ||||
|             fi | ||||
|             # metrics-server | ||||
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then | ||||
|               echo "[addons] metrics-server on $$n"; | ||||
|               kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true; | ||||
|               kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \ | ||||
|                 "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true; | ||||
|             fi | ||||
|             # istio (demo profile) | ||||
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then | ||||
|               echo "[addons] istio (demo profile) on $$n"; | ||||
|               istioctl install -y --set profile=demo --context kind-$$n; | ||||
|               kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true; | ||||
|               kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true; | ||||
|             fi | ||||
|             # kiali (server chart, anonymous auth) — требует istio/metrics | ||||
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then | ||||
|               echo "[addons] kiali on $$n"; | ||||
|               kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true; | ||||
|               helm upgrade --install kiali-server kiali/kiali-server \ | ||||
|                 --namespace istio-system --kube-context kind-$$n \ | ||||
|                 --set auth.strategy=anonymous --wait --timeout 180s; | ||||
|             fi | ||||
|             # kube-prometheus-stack (Prometheus + Grafana) | ||||
|             if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then | ||||
|               echo "[addons] kube-prometheus-stack on $$n"; | ||||
|               kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true; | ||||
|               helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \ | ||||
|                 --namespace monitoring --kube-context kind-$$n \ | ||||
|                 --set grafana.adminPassword=admin \ | ||||
|                 --set grafana.defaultDashboardsTimezone=browser \ | ||||
|                 --wait --timeout 600s; | ||||
|               # дождаться графаны | ||||
|               kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true; | ||||
|             fi | ||||
|           done | ||||
|           ' | ||||
|       when: (kind_clusters | default([])) | length > 0 | ||||
|  | ||||
|     - name: Setup NodePort for addons | ||||
|       community.docker.docker_container_exec: | ||||
|         container: ansible-controller | ||||
|         command: > | ||||
|           bash -lc ' | ||||
|           for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do | ||||
|             {% for cluster in kind_clusters | default([]) %} | ||||
|             {% if cluster.addon_ports is defined %} | ||||
|             if [ "$$n" = "{{ cluster.name }}" ]; then | ||||
|               {% if cluster.addon_ports.prometheus is defined %} | ||||
|               echo "[ports] Prometheus: {{ cluster.addon_ports.prometheus }}"; | ||||
|               kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.prometheus }}}]' 2>/dev/null || true; | ||||
|               {% endif %} | ||||
|               {% if cluster.addon_ports.grafana is defined %} | ||||
|               echo "[ports] Grafana: {{ cluster.addon_ports.grafana }}"; | ||||
|               kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-grafana --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.grafana }}}]' 2>/dev/null || true; | ||||
|               {% endif %} | ||||
|               {% if cluster.addon_ports.kiali is defined %} | ||||
|               echo "[ports] Kiali: {{ cluster.addon_ports.kiali }}"; | ||||
|               kubectl --context kind-{{ cluster.name }} patch svc -n istio-system kiali --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.kiali }}}]' 2>/dev/null || true; | ||||
|               {% endif %} | ||||
|             fi | ||||
|             {% endif %} | ||||
|             {% endfor %} | ||||
|           done | ||||
|           ' | ||||
|       when: (kind_clusters | default([])) | length > 0 | ||||
|     # ---------- Kind clusters создаются через Python скрипт create_k8s_cluster.py ---------- | ||||
|     # Все задачи по созданию кластеров и установке аддонов выполняются через make k8s create | ||||
| @@ -76,21 +76,8 @@ | ||||
|         # Используем переменную hosts из загруженного пресета | ||||
|         hosts: "{{ hosts }}" | ||||
|  | ||||
|     - name: Remove kind clusters | ||||
|       community.docker.docker_container_exec: | ||||
|         container: ansible-controller | ||||
|         command: > | ||||
|           bash -lc ' | ||||
|           set -e; | ||||
|           for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do | ||||
|             if kind get clusters | grep -qx "$$n"; then | ||||
|               echo "[kind] deleting $$n"; | ||||
|               kind delete cluster --name "$$n" || true; | ||||
|             fi | ||||
|           done | ||||
|           ' | ||||
|       when: (kind_clusters | default([])) | length > 0 | ||||
|       ignore_errors: true | ||||
|     # ---------- Kind clusters удаляются через make k8s destroy ---------- | ||||
|     # Все задачи по удалению кластеров выполняются через команду make k8s destroy | ||||
|  | ||||
|     - name: Display cleanup summary | ||||
|       debug: | ||||
|   | ||||
		Reference in New Issue
	
	Block a user