feat: добавить поддержку Kubernetes Kind кластеров
Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled
Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled
- Создан новый Docker образ k8s для работы с Kind, kubectl, Helm, Istio CLI - Добавлены команды make k8s: create, destroy, stop, start, status, config, nodes, addon, shell - Добавлена поддержка пресетов Kubernetes в molecule/presets/k8s/ - Создан скрипт create_k8s_cluster.py для автоматического создания кластеров и установки аддонов - Добавлена документация docs/kubernetes-kind.md - Команды kubectl выполняются внутри контейнера k8s, не требуют локальной установки
This commit is contained in:
@@ -4,7 +4,8 @@
|
||||
vars:
|
||||
# Получаем preset из переменной окружения или используем default
|
||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
||||
preset_file: "/workspace/molecule/presets/{{ preset_name }}.yml"
|
||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
||||
|
||||
# Fallback значения если preset файл не найден
|
||||
docker_network: labnet
|
||||
@@ -30,6 +31,7 @@
|
||||
- name: u1
|
||||
family: debian
|
||||
groups: [test]
|
||||
kind_clusters: []
|
||||
|
||||
tasks:
|
||||
# - name: Install required collections
|
||||
@@ -281,4 +283,135 @@
|
||||
- Groups: {{ groups_map.keys() | list | join(', ') }}
|
||||
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
||||
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
|
||||
# ---------- Kind clusters (если определены) ----------
|
||||
- name: Create kind cluster configs
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
mkdir -p /ansible/.kind;
|
||||
cat > /ansible/.kind/{{ item.name }}.yaml <<EOF
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
{% if (item.addons|default({})).ingress_nginx|default(false) %}
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: {{ item.ingress_host_http_port | default(8081) }}
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: {{ item.ingress_host_https_port | default(8443) }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
{% for i in range(item.workers | default(0)) %}
|
||||
- role: worker
|
||||
{% endfor %}
|
||||
networking:
|
||||
apiServerAddress: "0.0.0.0"
|
||||
apiServerPort: {{ item.api_port | default(0) }}
|
||||
EOF
|
||||
'
|
||||
loop: "{{ kind_clusters | default([]) }}"
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Create kind clusters
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
if kind get clusters | grep -qx "$$n"; then
|
||||
echo "[kind] cluster $$n already exists";
|
||||
else
|
||||
echo "[kind] creating $$n";
|
||||
kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml";
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled)
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo update >/dev/null 2>&1 || true;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
# ingress-nginx
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then
|
||||
echo "[addons] ingress-nginx on $$n";
|
||||
kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true;
|
||||
kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true;
|
||||
fi
|
||||
# metrics-server
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then
|
||||
echo "[addons] metrics-server on $$n";
|
||||
kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true;
|
||||
kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \
|
||||
"{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true;
|
||||
fi
|
||||
# istio (demo profile)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
|
||||
echo "[addons] istio (demo profile) on $$n";
|
||||
istioctl install -y --set profile=demo --context kind-$$n;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true;
|
||||
fi
|
||||
# kiali (server chart, anonymous auth) — требует istio/metrics
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then
|
||||
echo "[addons] kiali on $$n";
|
||||
kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true;
|
||||
helm upgrade --install kiali-server kiali/kiali-server \
|
||||
--namespace istio-system --kube-context kind-$$n \
|
||||
--set auth.strategy=anonymous --wait --timeout 180s;
|
||||
fi
|
||||
# kube-prometheus-stack (Prometheus + Grafana)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
|
||||
echo "[addons] kube-prometheus-stack on $$n";
|
||||
kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true;
|
||||
helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
|
||||
--namespace monitoring --kube-context kind-$$n \
|
||||
--set grafana.adminPassword=admin \
|
||||
--set grafana.defaultDashboardsTimezone=browser \
|
||||
--wait --timeout 600s;
|
||||
# дождаться графаны
|
||||
kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true;
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Setup NodePort for addons
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
{% for cluster in kind_clusters | default([]) %}
|
||||
{% if cluster.addon_ports is defined %}
|
||||
if [ "$$n" = "{{ cluster.name }}" ]; then
|
||||
{% if cluster.addon_ports.prometheus is defined %}
|
||||
echo "[ports] Prometheus: {{ cluster.addon_ports.prometheus }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.prometheus }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.grafana is defined %}
|
||||
echo "[ports] Grafana: {{ cluster.addon_ports.grafana }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-grafana --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.grafana }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.kiali is defined %}
|
||||
echo "[ports] Kiali: {{ cluster.addon_ports.kiali }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n istio-system kiali --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.kiali }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
fi
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
Reference in New Issue
Block a user