Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled
- Добавлен --validate=false для отключения валидации при установке аддонов - kubectl теперь подключается напрямую к control-plane узлу - Контейнер k8s-controller автоматически подключается к сети kind - Все kubectl команды используют --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify
243 lines
12 KiB
Python
Executable File
243 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
"""
|
||
Скрипт для создания Kind кластеров
|
||
Автор: Сергей Антропов
|
||
Сайт: https://devops.org.ru
|
||
"""
|
||
import sys
|
||
import yaml
|
||
import subprocess
|
||
import os
|
||
|
||
def run_cmd(cmd):
|
||
"""Выполнить команду"""
|
||
print(f"[run] {cmd}")
|
||
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
|
||
if result.returncode != 0:
|
||
print(f"[error] {result.stderr}")
|
||
sys.exit(1)
|
||
print(result.stdout)
|
||
return result.stdout
|
||
|
||
def main():
|
||
if len(sys.argv) < 3:
|
||
print("Usage: create_k8s_cluster.py <preset_file> <container_name>")
|
||
sys.exit(1)
|
||
|
||
preset_file = sys.argv[1]
|
||
container_name = sys.argv[2]
|
||
|
||
print(f"📋 Читаю пресет: {preset_file}")
|
||
with open(preset_file, 'r') as f:
|
||
preset = yaml.safe_load(f)
|
||
|
||
# Создаем Docker сеть если её нет
|
||
docker_network = preset.get('docker_network', 'labnet')
|
||
print(f"\n🌐 Проверка Docker сети: {docker_network}")
|
||
result = subprocess.run(f"docker network ls --format '{{{{.Name}}}}' | grep -x {docker_network}",
|
||
shell=True, capture_output=True, text=True)
|
||
if not result.stdout.strip():
|
||
print(f"📡 Создание Docker сети: {docker_network}")
|
||
run_cmd(f"docker network create {docker_network}")
|
||
else:
|
||
print(f"✅ Сеть {docker_network} уже существует")
|
||
|
||
# Получаем конфигурацию для hosts
|
||
hosts = preset.get('hosts', [])
|
||
images = preset.get('images', {})
|
||
systemd_defaults = preset.get('systemd_defaults', {})
|
||
|
||
# Создаем контейнеры если определены hosts
|
||
if hosts:
|
||
print(f"\n🐳 Создание контейнеров (всего: {len(hosts)})")
|
||
for host in hosts:
|
||
host_name = host['name']
|
||
family = host['family']
|
||
|
||
# Проверяем существование контейнера
|
||
result = subprocess.run(f"docker ps -a --format '{{{{.Names}}}}' | grep -x {host_name}",
|
||
shell=True, capture_output=True, text=True)
|
||
if result.stdout.strip():
|
||
print(f"⚠️ Контейнер '{host_name}' уже существует, удаляем старый")
|
||
run_cmd(f"docker rm -f {host_name}")
|
||
|
||
# Получаем образ
|
||
image = images.get(family, f"inecs/ansible-lab:{family}-latest")
|
||
|
||
# Формируем команду docker run
|
||
cmd_parts = [
|
||
"docker run -d",
|
||
f"--name {host_name}",
|
||
f"--network {docker_network}",
|
||
"--restart=unless-stopped"
|
||
]
|
||
|
||
# Добавляем systemd настройки
|
||
if systemd_defaults.get('privileged'):
|
||
cmd_parts.append("--privileged")
|
||
|
||
for vol in systemd_defaults.get('volumes', []):
|
||
cmd_parts.append(f"-v {vol}")
|
||
|
||
for tmpfs in systemd_defaults.get('tmpfs', []):
|
||
cmd_parts.append(f"--tmpfs {tmpfs}")
|
||
|
||
if systemd_defaults.get('capabilities'):
|
||
for cap in systemd_defaults['capabilities']:
|
||
cmd_parts.append(f"--cap-add {cap}")
|
||
|
||
cmd_parts.append(image)
|
||
|
||
# Добавляем command в конец если задан
|
||
if systemd_defaults.get('command'):
|
||
cmd_parts.append(systemd_defaults['command'])
|
||
|
||
cmd = " ".join(cmd_parts)
|
||
print(f"🚀 Создание контейнера: {host_name}")
|
||
run_cmd(cmd)
|
||
print(f"✅ Контейнер '{host_name}' создан")
|
||
|
||
kind_clusters = preset.get('kind_clusters', [])
|
||
if not kind_clusters:
|
||
print("\n⚠️ В пресете не определены kind кластеры")
|
||
print("✅ Создание контейнеров завершено")
|
||
sys.exit(0)
|
||
|
||
os.makedirs("/ansible/.kind", exist_ok=True)
|
||
|
||
for cluster in kind_clusters:
|
||
name = cluster['name']
|
||
config_file = f"/ansible/.kind/{name}.yaml"
|
||
|
||
print(f"\n☸️ Создание конфигурации для кластера: {name}")
|
||
|
||
# Создаем конфигурацию Kind
|
||
config = {
|
||
'kind': 'Cluster',
|
||
'apiVersion': 'kind.x-k8s.io/v1alpha4',
|
||
'nodes': [
|
||
{'role': 'control-plane'}
|
||
],
|
||
'networking': {
|
||
'apiServerAddress': '0.0.0.0',
|
||
'apiServerPort': cluster.get('api_port', 0)
|
||
}
|
||
}
|
||
|
||
# Добавляем extraPortMappings для ingress если нужно
|
||
if cluster.get('addons', {}).get('ingress_nginx'):
|
||
config['nodes'][0]['extraPortMappings'] = [
|
||
{
|
||
'containerPort': 80,
|
||
'hostPort': cluster.get('ingress_host_http_port', 8081),
|
||
'protocol': 'TCP'
|
||
},
|
||
{
|
||
'containerPort': 443,
|
||
'hostPort': cluster.get('ingress_host_https_port', 8443),
|
||
'protocol': 'TCP'
|
||
}
|
||
]
|
||
|
||
# Добавляем worker nodes
|
||
workers = cluster.get('workers', 0)
|
||
for i in range(workers):
|
||
config['nodes'].append({'role': 'worker'})
|
||
|
||
# Записываем конфигурацию
|
||
with open(config_file, 'w') as f:
|
||
yaml.dump(config, f)
|
||
|
||
print(f"✅ Конфигурация сохранена: {config_file}")
|
||
|
||
# Проверяем существование кластера
|
||
result = subprocess.run(f"kind get clusters", shell=True, capture_output=True, text=True)
|
||
existing = result.stdout.strip().split('\n') if result.returncode == 0 else []
|
||
|
||
if name in existing:
|
||
print(f"⚠️ Кластер '{name}' уже существует, пропускаю")
|
||
else:
|
||
print(f"🚀 Создание кластера: {name}")
|
||
run_cmd(f"kind create cluster --name {name} --config {config_file}")
|
||
|
||
# Подключаем контейнер k8s-controller к сети kind
|
||
print(f"🔗 Подключение контейнера к сети kind...")
|
||
result = subprocess.run(f"docker network inspect kind", shell=True, capture_output=True, text=True)
|
||
if result.returncode == 0:
|
||
# Получаем имя контейнера из аргументов (второй аргумент)
|
||
controller_name = sys.argv[2] if len(sys.argv) > 2 else "k8s-controller"
|
||
result = subprocess.run(f"docker network connect kind {controller_name}", shell=True, capture_output=True, text=True)
|
||
if result.returncode == 0:
|
||
print(f"✅ Контейнер {controller_name} подключен к сети kind")
|
||
else:
|
||
print(f"⚠️ Не удалось подключить контейнер к сети kind: {result.stderr}")
|
||
else:
|
||
print(f"⚠️ Сеть kind не найдена")
|
||
|
||
# Устанавливаем аддоны
|
||
addons = cluster.get('addons', {})
|
||
if not addons:
|
||
continue
|
||
|
||
print(f"\n📦 Установка аддонов для кластера: {name}")
|
||
|
||
if addons.get('ingress_nginx'):
|
||
print(" - Installing ingress-nginx")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify apply --validate=false -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s")
|
||
|
||
if addons.get('metrics_server'):
|
||
print(" - Installing metrics-server")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify apply --validate=false -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml")
|
||
patch_json = '{"spec":{"template":{"spec":{"containers":[{"name":"metrics-server","args":["--kubelet-insecure-tls","--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]}]}}}}'
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify -n kube-system patch deploy metrics-server -p '{patch_json}'")
|
||
|
||
if addons.get('istio'):
|
||
print(" - Installing Istio")
|
||
run_cmd(f"istioctl install -y --set profile=demo --context kind-{name}")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify -n istio-system rollout status deploy/istiod --timeout=180s")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s")
|
||
|
||
if addons.get('kiali'):
|
||
print(" - Installing Kiali")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify create ns istio-system")
|
||
run_cmd(f"helm upgrade --install kiali-server kiali/kiali-server --namespace istio-system --kube-context kind-{name} --set auth.strategy=anonymous --wait --timeout 180s")
|
||
|
||
if addons.get('prometheus_stack'):
|
||
print(" - Installing Prometheus Stack")
|
||
run_cmd(f"helm repo add prometheus-community https://prometheus-community.github.io/helm-charts")
|
||
run_cmd(f"helm repo update")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify create ns monitoring")
|
||
run_cmd(f"helm upgrade --install monitoring prometheus-community/kube-prometheus-stack --namespace monitoring --kube-context kind-{name} --set grafana.adminPassword=admin --set grafana.defaultDashboardsTimezone=browser --wait --timeout 600s")
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify -n monitoring rollout status deploy/monitoring-grafana --timeout=300s")
|
||
|
||
# Настраиваем NodePort для аддонов
|
||
addon_ports = cluster.get('addon_ports', {})
|
||
if addon_ports:
|
||
print("\n🔌 Настройка NodePort для аддонов")
|
||
|
||
if 'prometheus' in addon_ports:
|
||
port = addon_ports['prometheus']
|
||
print(f" - Prometheus: {port}")
|
||
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='{patch_json}'")
|
||
|
||
if 'grafana' in addon_ports:
|
||
port = addon_ports['grafana']
|
||
print(f" - Grafana: {port}")
|
||
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify patch svc -n monitoring monitoring-grafana --type='json' -p='{patch_json}'")
|
||
|
||
if 'kiali' in addon_ports:
|
||
port = addon_ports['kiali']
|
||
print(f" - Kiali: {port}")
|
||
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
|
||
run_cmd(f"kubectl --server=https://{name}-control-plane:6443 --insecure-skip-tls-verify patch svc -n istio-system kiali --type='json' -p='{patch_json}'")
|
||
|
||
print(f"✅ Кластер '{name}' готов!")
|
||
|
||
print("\n🎉 Все кластеры созданы!")
|
||
|
||
if __name__ == '__main__':
|
||
main()
|