feat: добавить поддержку Kubernetes Kind кластеров
Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled

- Создан новый Docker образ k8s для работы с Kind, kubectl, Helm, Istio CLI
- Добавлены команды make k8s: create, destroy, stop, start, status, config, nodes, addon, shell
- Добавлена поддержка пресетов Kubernetes в molecule/presets/k8s/
- Создан скрипт create_k8s_cluster.py для автоматического создания кластеров и установки аддонов
- Добавлена документация docs/kubernetes-kind.md
- Команды kubectl выполняются внутри контейнера k8s, не требуют локальной установки
This commit is contained in:
Сергей Антропов
2025-10-26 03:30:58 +03:00
parent c1655d2674
commit 881502ad69
12 changed files with 1487 additions and 5 deletions

161
scripts/create_k8s_cluster.py Executable file
View File

@@ -0,0 +1,161 @@
#!/usr/bin/env python3
"""
Скрипт для создания Kind кластеров
Автор: Сергей Антропов
Сайт: https://devops.org.ru
"""
import sys
import yaml
import subprocess
import os
def run_cmd(cmd):
"""Выполнить команду"""
print(f"[run] {cmd}")
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
if result.returncode != 0:
print(f"[error] {result.stderr}")
sys.exit(1)
print(result.stdout)
return result.stdout
def main():
if len(sys.argv) < 3:
print("Usage: create_k8s_cluster.py <preset_file> <container_name>")
sys.exit(1)
preset_file = sys.argv[1]
container_name = sys.argv[2]
print(f"📋 Читаю пресет: {preset_file}")
with open(preset_file, 'r') as f:
preset = yaml.safe_load(f)
kind_clusters = preset.get('kind_clusters', [])
if not kind_clusters:
print("⚠️ В пресете не определены kind кластеры")
sys.exit(0)
os.makedirs("/ansible/.kind", exist_ok=True)
for cluster in kind_clusters:
name = cluster['name']
config_file = f"/ansible/.kind/{name}.yaml"
print(f"\n☸️ Создание конфигурации для кластера: {name}")
# Создаем конфигурацию Kind
config = {
'kind': 'Cluster',
'apiVersion': 'kind.x-k8s.io/v1alpha4',
'nodes': [
{'role': 'control-plane'}
],
'networking': {
'apiServerAddress': '0.0.0.0',
'apiServerPort': cluster.get('api_port', 0)
}
}
# Добавляем extraPortMappings для ingress если нужно
if cluster.get('addons', {}).get('ingress_nginx'):
config['nodes'][0]['extraPortMappings'] = [
{
'containerPort': 80,
'hostPort': cluster.get('ingress_host_http_port', 8081),
'protocol': 'TCP'
},
{
'containerPort': 443,
'hostPort': cluster.get('ingress_host_https_port', 8443),
'protocol': 'TCP'
}
]
# Добавляем worker nodes
workers = cluster.get('workers', 0)
for i in range(workers):
config['nodes'].append({'role': 'worker'})
# Записываем конфигурацию
with open(config_file, 'w') as f:
yaml.dump(config, f)
print(f"✅ Конфигурация сохранена: {config_file}")
# Проверяем существование кластера
result = subprocess.run(f"kind get clusters", shell=True, capture_output=True, text=True)
existing = result.stdout.strip().split('\n') if result.returncode == 0 else []
if name in existing:
print(f"⚠️ Кластер '{name}' уже существует, пропускаю")
else:
print(f"🚀 Создание кластера: {name}")
run_cmd(f"kind create cluster --name {name} --config {config_file}")
# Устанавливаем аддоны
addons = cluster.get('addons', {})
if not addons:
continue
print(f"\n📦 Установка аддонов для кластера: {name}")
if addons.get('ingress_nginx'):
print(" - Installing ingress-nginx")
run_cmd(f"kubectl --context kind-{name} apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml")
run_cmd(f"kubectl --context kind-{name} -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s")
if addons.get('metrics_server'):
print(" - Installing metrics-server")
run_cmd(f"kubectl --context kind-{name} apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml")
patch_json = '{"spec":{"template":{"spec":{"containers":[{"name":"metrics-server","args":["--kubelet-insecure-tls","--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]}]}}}}'
run_cmd(f"kubectl --context kind-{name} -n kube-system patch deploy metrics-server -p '{patch_json}'")
if addons.get('istio'):
print(" - Installing Istio")
run_cmd(f"istioctl install -y --set profile=demo --context kind-{name}")
run_cmd(f"kubectl --context kind-{name} -n istio-system rollout status deploy/istiod --timeout=180s")
run_cmd(f"kubectl --context kind-{name} -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s")
if addons.get('kiali'):
print(" - Installing Kiali")
run_cmd(f"kubectl --context kind-{name} create ns istio-system")
run_cmd(f"helm upgrade --install kiali-server kiali/kiali-server --namespace istio-system --kube-context kind-{name} --set auth.strategy=anonymous --wait --timeout 180s")
if addons.get('prometheus_stack'):
print(" - Installing Prometheus Stack")
run_cmd(f"helm repo add prometheus-community https://prometheus-community.github.io/helm-charts")
run_cmd(f"helm repo update")
run_cmd(f"kubectl --context kind-{name} create ns monitoring")
run_cmd(f"helm upgrade --install monitoring prometheus-community/kube-prometheus-stack --namespace monitoring --kube-context kind-{name} --set grafana.adminPassword=admin --set grafana.defaultDashboardsTimezone=browser --wait --timeout 600s")
run_cmd(f"kubectl --context kind-{name} -n monitoring rollout status deploy/monitoring-grafana --timeout=300s")
# Настраиваем NodePort для аддонов
addon_ports = cluster.get('addon_ports', {})
if addon_ports:
print("\n🔌 Настройка NodePort для аддонов")
if 'prometheus' in addon_ports:
port = addon_ports['prometheus']
print(f" - Prometheus: {port}")
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
run_cmd(f"kubectl --context kind-{name} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='{patch_json}'")
if 'grafana' in addon_ports:
port = addon_ports['grafana']
print(f" - Grafana: {port}")
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
run_cmd(f"kubectl --context kind-{name} patch svc -n monitoring monitoring-grafana --type='json' -p='{patch_json}'")
if 'kiali' in addon_ports:
port = addon_ports['kiali']
print(f" - Kiali: {port}")
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
run_cmd(f"kubectl --context kind-{name} patch svc -n istio-system kiali --type='json' -p='{patch_json}'")
print(f"✅ Кластер '{name}' готов!")
print("\n🎉 Все кластеры созданы!")
if __name__ == '__main__':
main()