feat: добавить поддержку Kubernetes Kind кластеров
Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled
Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled
- Создан новый Docker образ k8s для работы с Kind, kubectl, Helm, Istio CLI - Добавлены команды make k8s: create, destroy, stop, start, status, config, nodes, addon, shell - Добавлена поддержка пресетов Kubernetes в molecule/presets/k8s/ - Создан скрипт create_k8s_cluster.py для автоматического создания кластеров и установки аддонов - Добавлена документация docs/kubernetes-kind.md - Команды kubectl выполняются внутри контейнера k8s, не требуют локальной установки
This commit is contained in:
@@ -4,7 +4,8 @@
|
||||
vars:
|
||||
# Получаем preset из переменной окружения или используем default
|
||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
||||
preset_file: "/workspace/molecule/presets/{{ preset_name }}.yml"
|
||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
||||
|
||||
# Fallback значения если preset файл не найден
|
||||
docker_network: labnet
|
||||
@@ -30,6 +31,7 @@
|
||||
- name: u1
|
||||
family: debian
|
||||
groups: [test]
|
||||
kind_clusters: []
|
||||
|
||||
tasks:
|
||||
# - name: Install required collections
|
||||
@@ -281,4 +283,135 @@
|
||||
- Groups: {{ groups_map.keys() | list | join(', ') }}
|
||||
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
||||
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
|
||||
# ---------- Kind clusters (если определены) ----------
|
||||
- name: Create kind cluster configs
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
mkdir -p /ansible/.kind;
|
||||
cat > /ansible/.kind/{{ item.name }}.yaml <<EOF
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
{% if (item.addons|default({})).ingress_nginx|default(false) %}
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: {{ item.ingress_host_http_port | default(8081) }}
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: {{ item.ingress_host_https_port | default(8443) }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
{% for i in range(item.workers | default(0)) %}
|
||||
- role: worker
|
||||
{% endfor %}
|
||||
networking:
|
||||
apiServerAddress: "0.0.0.0"
|
||||
apiServerPort: {{ item.api_port | default(0) }}
|
||||
EOF
|
||||
'
|
||||
loop: "{{ kind_clusters | default([]) }}"
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Create kind clusters
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
if kind get clusters | grep -qx "$$n"; then
|
||||
echo "[kind] cluster $$n already exists";
|
||||
else
|
||||
echo "[kind] creating $$n";
|
||||
kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml";
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled)
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo update >/dev/null 2>&1 || true;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
# ingress-nginx
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then
|
||||
echo "[addons] ingress-nginx on $$n";
|
||||
kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true;
|
||||
kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true;
|
||||
fi
|
||||
# metrics-server
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then
|
||||
echo "[addons] metrics-server on $$n";
|
||||
kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true;
|
||||
kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \
|
||||
"{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true;
|
||||
fi
|
||||
# istio (demo profile)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
|
||||
echo "[addons] istio (demo profile) on $$n";
|
||||
istioctl install -y --set profile=demo --context kind-$$n;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true;
|
||||
fi
|
||||
# kiali (server chart, anonymous auth) — требует istio/metrics
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then
|
||||
echo "[addons] kiali on $$n";
|
||||
kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true;
|
||||
helm upgrade --install kiali-server kiali/kiali-server \
|
||||
--namespace istio-system --kube-context kind-$$n \
|
||||
--set auth.strategy=anonymous --wait --timeout 180s;
|
||||
fi
|
||||
# kube-prometheus-stack (Prometheus + Grafana)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
|
||||
echo "[addons] kube-prometheus-stack on $$n";
|
||||
kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true;
|
||||
helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
|
||||
--namespace monitoring --kube-context kind-$$n \
|
||||
--set grafana.adminPassword=admin \
|
||||
--set grafana.defaultDashboardsTimezone=browser \
|
||||
--wait --timeout 600s;
|
||||
# дождаться графаны
|
||||
kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true;
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Setup NodePort for addons
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
{% for cluster in kind_clusters | default([]) %}
|
||||
{% if cluster.addon_ports is defined %}
|
||||
if [ "$$n" = "{{ cluster.name }}" ]; then
|
||||
{% if cluster.addon_ports.prometheus is defined %}
|
||||
echo "[ports] Prometheus: {{ cluster.addon_ports.prometheus }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.prometheus }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.grafana is defined %}
|
||||
echo "[ports] Grafana: {{ cluster.addon_ports.grafana }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-grafana --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.grafana }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.kiali is defined %}
|
||||
echo "[ports] Kiali: {{ cluster.addon_ports.kiali }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n istio-system kiali --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.kiali }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
fi
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
428
molecule/default/create.yml.bak
Normal file
428
molecule/default/create.yml.bak
Normal file
@@ -0,0 +1,428 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
# Получаем preset из переменной окружения или используем default
|
||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
||||
|
||||
# Fallback значения если preset файл не найден
|
||||
docker_network: labnet
|
||||
generated_inventory: "{{ molecule_ephemeral_directory }}/inventory/hosts.ini"
|
||||
images:
|
||||
alt: "inecs/ansible-lab:alt-linux-latest"
|
||||
astra: "inecs/ansible-lab:astra-linux-latest"
|
||||
rhel: "inecs/ansible-lab:rhel-latest"
|
||||
centos: "inecs/ansible-lab:centos-latest"
|
||||
alma: "inecs/ansible-lab:alma-latest"
|
||||
rocky: "inecs/ansible-lab:rocky-latest"
|
||||
redos: "inecs/ansible-lab:redos-latest"
|
||||
ubuntu: "inecs/ansible-lab:ubuntu-latest"
|
||||
debian: "inecs/ansible-lab:debian-latest"
|
||||
systemd_defaults:
|
||||
privileged: true
|
||||
command: "/sbin/init"
|
||||
volumes:
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
hosts:
|
||||
- name: u1
|
||||
family: debian
|
||||
groups: [test]
|
||||
kind_clusters: []
|
||||
|
||||
tasks:
|
||||
# - name: Install required collections
|
||||
# command: ansible-galaxy collection install -r /workspace/requirements.yml
|
||||
# delegate_to: localhost
|
||||
# ignore_errors: true
|
||||
# register: collections_install
|
||||
# changed_when: false
|
||||
# run_once: true
|
||||
# become: true
|
||||
# vars:
|
||||
# ansible_python_interpreter: /usr/bin/python3
|
||||
# environment:
|
||||
# ANSIBLE_COLLECTIONS_PATH: /usr/share/ansible/collections
|
||||
|
||||
# Определяем архитектуру системы для корректной загрузки образов
|
||||
- name: Detect system architecture
|
||||
shell: |
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64) echo "linux/amd64" ;;
|
||||
aarch64|arm64) echo "linux/arm64" ;;
|
||||
armv7l) echo "linux/arm/v7" ;;
|
||||
*) echo "linux/amd64" ;;
|
||||
esac
|
||||
register: detected_platform
|
||||
changed_when: false
|
||||
|
||||
- name: Set ansible_architecture variable
|
||||
set_fact:
|
||||
ansible_architecture: "{{ detected_platform.stdout }}"
|
||||
|
||||
- name: Load preset configuration
|
||||
include_vars: "{{ preset_file }}"
|
||||
when: preset_file is file
|
||||
ignore_errors: true
|
||||
|
||||
# Фильтрация хостов по поддерживаемым платформам
|
||||
- name: Filter hosts by supported platforms
|
||||
set_fact:
|
||||
filtered_hosts: "{{ filtered_hosts | default([]) + [item] }}"
|
||||
loop: "{{ hosts }}"
|
||||
when: |
|
||||
item.supported_platforms is not defined or
|
||||
ansible_architecture in item.supported_platforms
|
||||
|
||||
- name: Update hosts list with filtered results
|
||||
set_fact:
|
||||
hosts: "{{ filtered_hosts | default(hosts) }}"
|
||||
|
||||
- name: Display filtered hosts
|
||||
debug:
|
||||
msg: "Platform {{ ansible_architecture }}: {{ hosts | length }} hosts will be deployed"
|
||||
|
||||
- name: Ensure network exists
|
||||
community.docker.docker_network:
|
||||
name: "{{ docker_network }}"
|
||||
state: present
|
||||
|
||||
# SYSTEMD nodes
|
||||
- name: Pull systemd images with correct platform
|
||||
command: "docker pull --platform {{ ansible_architecture }} {{ images[item.family] }}"
|
||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
register: pull_result
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Display pull results
|
||||
debug:
|
||||
msg: "Pulled {{ item.item.name }}: {{ 'OK' if (item.rc is defined and item.rc == 0) else 'SKIPPED (not available for this platform)' }}"
|
||||
loop: "{{ pull_result.results | default([]) }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.name }}"
|
||||
|
||||
- name: Start systemd nodes
|
||||
community.docker.docker_container:
|
||||
name: "{{ item.name }}"
|
||||
image: "{{ images[item.family] }}"
|
||||
networks:
|
||||
- name: "{{ docker_network }}"
|
||||
privileged: "{{ systemd_defaults.privileged }}"
|
||||
command: "{{ systemd_defaults.command }}"
|
||||
volumes: "{{ systemd_defaults.volumes | default([]) + (item.volumes | default([])) }}"
|
||||
tmpfs: "{{ systemd_defaults.tmpfs | default([]) }}"
|
||||
capabilities: "{{ systemd_defaults.capabilities | default([]) }}"
|
||||
published_ports: "{{ item.publish | default([]) }}"
|
||||
env: "{{ item.env | default({}) }}"
|
||||
# Специальные настройки для Astra Linux и RedOS (для совместимости с amd64 базовыми образами)
|
||||
security_opts: "{{ ['seccomp=unconfined', 'apparmor=unconfined'] if item.family in ['astra', 'redos'] else [] }}"
|
||||
platform: "{{ 'linux/amd64' if item.family in ['astra', 'redos'] else omit }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
|
||||
# Ожидание стабилизации контейнеров
|
||||
- name: Wait for containers to be ready
|
||||
pause:
|
||||
seconds: 5
|
||||
when: hosts | length > 0
|
||||
|
||||
# Создание tmp директории в контейнерах
|
||||
- name: Create Ansible tmp directory in containers
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ item.name }}"
|
||||
command: "mkdir -p /tmp/.ansible-tmp && chmod 755 /tmp/.ansible-tmp"
|
||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
ignore_errors: true
|
||||
retries: 3
|
||||
delay: 2
|
||||
|
||||
# DinD nodes
|
||||
- name: Start DinD nodes (docker:27-dind)
|
||||
community.docker.docker_container:
|
||||
name: "{{ item.name }}"
|
||||
image: "docker:27-dind"
|
||||
networks:
|
||||
- name: "{{ docker_network }}"
|
||||
privileged: true
|
||||
env:
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
published_ports: "{{ item.publish | default([]) }}"
|
||||
volumes: "{{ (item.volumes | default([])) + [item.name + '-docker:/var/lib/docker'] }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
|
||||
# DOoD nodes (mount docker.sock)
|
||||
- name: Start DOoD nodes (systemd + docker.sock mount)
|
||||
community.docker.docker_container:
|
||||
name: "{{ item.name }}"
|
||||
image: "{{ images[item.family] }}"
|
||||
networks:
|
||||
- name: "{{ docker_network }}"
|
||||
privileged: "{{ systemd_defaults.privileged }}"
|
||||
command: "{{ systemd_defaults.command }}"
|
||||
volumes: "{{ (systemd_defaults.volumes | default([])) + ['/var/run/docker.sock:/var/run/docker.sock'] + (item.volumes | default([])) }}"
|
||||
tmpfs: "{{ systemd_defaults.tmpfs | default([]) }}"
|
||||
capabilities: "{{ systemd_defaults.capabilities | default([]) }}"
|
||||
published_ports: "{{ item.publish | default([]) }}"
|
||||
env: "{{ item.env | default({}) }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
|
||||
# Build groups map
|
||||
- name: Initialize groups map
|
||||
set_fact:
|
||||
groups_map: {}
|
||||
|
||||
- name: Append hosts to groups
|
||||
set_fact:
|
||||
groups_map: "{{ groups_map | combine({ item_group: (groups_map[item_group] | default([])) + [item_name] }) }}"
|
||||
loop: "{{ hosts | subelements('groups', skip_missing=True) }}"
|
||||
loop_control:
|
||||
label: "{{ item.0.name }}"
|
||||
vars:
|
||||
item_name: "{{ item.0.name }}"
|
||||
item_group: "{{ item.1 }}"
|
||||
|
||||
# Render inventory
|
||||
- name: Render inventory ini
|
||||
set_fact:
|
||||
inv_content: |
|
||||
[all:vars]
|
||||
ansible_connection=community.docker.docker
|
||||
ansible_remote_tmp=/tmp/.ansible-tmp
|
||||
|
||||
{% for group, members in (groups_map | dictsort) %}
|
||||
[{{ group }}]
|
||||
{% for h in members %}{{ h }}
|
||||
{% endfor %}
|
||||
|
||||
{% endfor %}
|
||||
[all]
|
||||
{% for h in hosts %}{{ h.name }}
|
||||
{% endfor %}
|
||||
|
||||
{# Группа с Debian-based системами (Debian, Ubuntu, Alt) - используем /usr/bin/python3 #}
|
||||
{% set debian_hosts = [] %}
|
||||
{% for h in hosts %}
|
||||
{% if h.family in ['ubuntu', 'debian', 'alt'] %}
|
||||
{% set _ = debian_hosts.append(h.name) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if debian_hosts %}
|
||||
[debian_family:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
[debian_family]
|
||||
{% for h in debian_hosts %}{{ h }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Группа с RHEL-based системами (RHEL, CentOS, Alma, Rocky, RedOS) #}
|
||||
{% set rhel_hosts = [] %}
|
||||
{% for h in hosts %}
|
||||
{% if h.family in ['rhel', 'centos', 'alma', 'rocky', 'redos'] %}
|
||||
{% set _ = rhel_hosts.append(h.name) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if rhel_hosts %}
|
||||
[rhel_family:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
[rhel_family]
|
||||
{% for h in rhel_hosts %}{{ h }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Astra Linux - используем /usr/bin/python3 #}
|
||||
{% set astra_hosts = [] %}
|
||||
{% for h in hosts %}
|
||||
{% if h.family == 'astra' %}
|
||||
{% set _ = astra_hosts.append(h.name) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if astra_hosts %}
|
||||
[astra_family:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
[astra_family]
|
||||
{% for h in astra_hosts %}{{ h }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Глобальный fallback для остальных хостов #}
|
||||
[unmatched_hosts:vars]
|
||||
ansible_python_interpreter=auto_silent
|
||||
|
||||
- name: Ensure inventory directory exists
|
||||
file:
|
||||
path: "{{ generated_inventory | dirname }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Write inventory file
|
||||
copy:
|
||||
dest: "{{ generated_inventory }}"
|
||||
content: "{{ inv_content }}"
|
||||
mode: "0644"
|
||||
|
||||
- name: Display inventory summary
|
||||
debug:
|
||||
msg: |
|
||||
📋 Inventory Summary:
|
||||
- Total hosts: {{ hosts | length }}
|
||||
- Groups: {{ groups_map.keys() | list | join(', ') }}
|
||||
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
||||
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
|
||||
# ---------- Kind clusters (если определены) ----------
|
||||
- name: Prepare kind cluster configs
|
||||
set_fact:
|
||||
kind_config_content: |
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
{% if (item.addons|default({})).ingress_nginx|default(false) %}
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: {{ item.ingress_host_http_port | default(8081) }}
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: {{ item.ingress_host_https_port | default(8443) }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
{% for i in range(item.workers | default(0)) %}
|
||||
- role: worker
|
||||
{% endfor %}
|
||||
networking:
|
||||
apiServerAddress: "0.0.0.0"
|
||||
apiServerPort: {{ item.api_port | default(0) }}
|
||||
loop: "{{ kind_clusters | default([]) }}"
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Create kind cluster configs
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ ansible_controller_container | default('ansible-controller') }}"
|
||||
command: >
|
||||
bash -c "
|
||||
mkdir -p /ansible/.kind;
|
||||
echo '{{ kind_config_content }}' > /ansible/.kind/{{ item.name }}.yaml
|
||||
"
|
||||
loop: "{{ kind_clusters | default([]) }}"
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Create kind clusters
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
if kind get clusters | grep -qx "$$n"; then
|
||||
echo "[kind] cluster $$n already exists";
|
||||
else
|
||||
echo "[kind] creating $$n";
|
||||
kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml";
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled)
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo update >/dev/null 2>&1 || true;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
# ingress-nginx
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then
|
||||
echo "[addons] ingress-nginx on $$n";
|
||||
kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true;
|
||||
kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true;
|
||||
fi
|
||||
# metrics-server
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then
|
||||
echo "[addons] metrics-server on $$n";
|
||||
kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true;
|
||||
kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \
|
||||
"{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true;
|
||||
fi
|
||||
# istio (demo profile)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
|
||||
echo "[addons] istio (demo profile) on $$n";
|
||||
istioctl install -y --set profile=demo --context kind-$$n;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true;
|
||||
fi
|
||||
# kiali (server chart, anonymous auth) — требует istio/metrics
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then
|
||||
echo "[addons] kiali on $$n";
|
||||
kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true;
|
||||
helm upgrade --install kiali-server kiali/kiali-server \
|
||||
--namespace istio-system --kube-context kind-$$n \
|
||||
--set auth.strategy=anonymous --wait --timeout 180s;
|
||||
fi
|
||||
# kube-prometheus-stack (Prometheus + Grafana)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
|
||||
echo "[addons] kube-prometheus-stack on $$n";
|
||||
kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true;
|
||||
helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
|
||||
--namespace monitoring --kube-context kind-$$n \
|
||||
--set grafana.adminPassword=admin \
|
||||
--set grafana.defaultDashboardsTimezone=browser \
|
||||
--wait --timeout 600s;
|
||||
# дождаться графаны
|
||||
kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true;
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Setup NodePort for addons
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
{% for cluster in kind_clusters | default([]) %}
|
||||
{% if cluster.addon_ports is defined %}
|
||||
if [ "$$n" = "{{ cluster.name }}" ]; then
|
||||
{% if cluster.addon_ports.prometheus is defined %}
|
||||
echo "[ports] Prometheus: {{ cluster.addon_ports.prometheus }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.prometheus }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.grafana is defined %}
|
||||
echo "[ports] Grafana: {{ cluster.addon_ports.grafana }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-grafana --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.grafana }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.kiali is defined %}
|
||||
echo "[ports] Kiali: {{ cluster.addon_ports.kiali }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n istio-system kiali --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.kiali }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
fi
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
@@ -4,7 +4,8 @@
|
||||
vars:
|
||||
# Получаем preset из переменной окружения или используем default
|
||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
||||
preset_file: "/workspace/molecule/presets/{{ preset_name }}.yml"
|
||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
||||
|
||||
# Fallback значения если preset файл не найден
|
||||
docker_network: labnet
|
||||
@@ -12,6 +13,7 @@
|
||||
- name: u1
|
||||
family: debian
|
||||
groups: [test]
|
||||
kind_clusters: []
|
||||
|
||||
tasks:
|
||||
- name: Load preset configuration
|
||||
@@ -74,10 +76,27 @@
|
||||
# Используем переменную hosts из загруженного пресета
|
||||
hosts: "{{ hosts }}"
|
||||
|
||||
- name: Remove kind clusters
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
if kind get clusters | grep -qx "$$n"; then
|
||||
echo "[kind] deleting $$n";
|
||||
kind delete cluster --name "$$n" || true;
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
ignore_errors: true
|
||||
|
||||
- name: Display cleanup summary
|
||||
debug:
|
||||
msg: |
|
||||
🧹 Cleanup Summary:
|
||||
- Removed containers: {{ hosts | length }}
|
||||
- Removed DinD volumes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||
- Network: {{ docker_network }}
|
||||
- Network: {{ docker_network }}
|
||||
- Removed kind clusters: {{ kind_clusters | default([]) | length }}
|
||||
@@ -33,6 +33,20 @@ systemd_defaults:
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
|
||||
# Kind кластеры (опционально)
|
||||
# kind_clusters:
|
||||
# - name: lab
|
||||
# workers: 2
|
||||
# api_port: 6443
|
||||
# addons:
|
||||
# ingress_nginx: true
|
||||
# metrics_server: true
|
||||
# istio: true
|
||||
# kiali: true
|
||||
# prometheus_stack: true
|
||||
# ingress_host_http_port: 8081
|
||||
# ingress_host_https_port: 8443
|
||||
|
||||
hosts:
|
||||
# Стандартный набор - 3 хоста
|
||||
- name: u1
|
||||
|
||||
42
molecule/presets/k8s/k8s-minimal.yml
Normal file
42
molecule/presets/k8s/k8s-minimal.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
#description: Минимальный Kind кластер без аддонов
|
||||
# Автор: Сергей Антропов
|
||||
# Сайт: https://devops.org.ru
|
||||
|
||||
docker_network: labnet
|
||||
generated_inventory: "{{ molecule_ephemeral_directory }}/inventory/hosts.ini"
|
||||
|
||||
# systemd-ready образы
|
||||
images:
|
||||
alt: "inecs/ansible-lab:alt-linux-latest"
|
||||
astra: "inecs/ansible-lab:astra-linux-latest"
|
||||
rhel: "inecs/ansible-lab:rhel-latest"
|
||||
centos7: "inecs/ansible-lab:centos7-latest"
|
||||
centos8: "inecs/ansible-lab:centos8-latest"
|
||||
centos9: "inecs/ansible-lab:centos9-latest"
|
||||
alma: "inecs/ansible-lab:alma-latest"
|
||||
rocky: "inecs/ansible-lab:rocky-latest"
|
||||
redos: "inecs/ansible-lab:redos-latest"
|
||||
ubuntu20: "inecs/ansible-lab:ubuntu20-latest"
|
||||
ubuntu22: "inecs/ansible-lab:ubuntu22-latest"
|
||||
ubuntu24: "inecs/ansible-lab:ubuntu24-latest"
|
||||
debian9: "inecs/ansible-lab:debian9-latest"
|
||||
debian10: "inecs/ansible-lab:debian10-latest"
|
||||
debian11: "inecs/ansible-lab:debian11-latest"
|
||||
debian12: "inecs/ansible-lab:debian12-latest"
|
||||
|
||||
systemd_defaults:
|
||||
privileged: true
|
||||
command: "/sbin/init"
|
||||
volumes:
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
|
||||
# Минимальный Kind кластер без аддонов
|
||||
kind_clusters:
|
||||
- name: minimal
|
||||
workers: 0 # Только control-plane
|
||||
api_port: 6443
|
||||
|
||||
hosts: []
|
||||
59
molecule/presets/k8s/kubernetes.yml
Normal file
59
molecule/presets/k8s/kubernetes.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
#description: Пресет для тестирования с Kubernetes Kind кластером
|
||||
# Автор: Сергей Антропов
|
||||
# Сайт: https://devops.org.ru
|
||||
|
||||
docker_network: labnet
|
||||
generated_inventory: "{{ molecule_ephemeral_directory }}/inventory/hosts.ini"
|
||||
|
||||
# systemd-ready образы
|
||||
images:
|
||||
alt: "inecs/ansible-lab:alt-linux-latest"
|
||||
astra: "inecs/ansible-lab:astra-linux-latest"
|
||||
rhel: "inecs/ansible-lab:rhel-latest"
|
||||
centos7: "inecs/ansible-lab:centos7-latest"
|
||||
centos8: "inecs/ansible-lab:centos8-latest"
|
||||
centos9: "inecs/ansible-lab:centos9-latest"
|
||||
alma: "inecs/ansible-lab:alma-latest"
|
||||
rocky: "inecs/ansible-lab:rocky-latest"
|
||||
redos: "inecs/ansible-lab:redos-latest"
|
||||
ubuntu20: "inecs/ansible-lab:ubuntu20-latest"
|
||||
ubuntu22: "inecs/ansible-lab:ubuntu22-latest"
|
||||
ubuntu24: "inecs/ansible-lab:ubuntu24-latest"
|
||||
debian9: "inecs/ansible-lab:debian9-latest"
|
||||
debian10: "inecs/ansible-lab:debian10-latest"
|
||||
debian11: "inecs/ansible-lab:debian11-latest"
|
||||
debian12: "inecs/ansible-lab:debian12-latest"
|
||||
|
||||
systemd_defaults:
|
||||
privileged: true
|
||||
command: "/sbin/init"
|
||||
volumes:
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
|
||||
# Kind кластеры с полным набором аддонов
|
||||
kind_clusters:
|
||||
- name: lab
|
||||
workers: 2
|
||||
api_port: 6443
|
||||
addons:
|
||||
ingress_nginx: true
|
||||
metrics_server: true
|
||||
istio: true
|
||||
kiali: true
|
||||
prometheus_stack: true
|
||||
ingress_host_http_port: 8081
|
||||
ingress_host_https_port: 8443
|
||||
# Порты для доступа к аддонам извне
|
||||
# Документация: https://devops.org.ru
|
||||
# Prometheus: http://localhost:9090
|
||||
# Grafana: http://localhost:3000 (admin/admin)
|
||||
# Kiali: http://localhost:20001
|
||||
addon_ports:
|
||||
prometheus: 9090
|
||||
grafana: 3000
|
||||
kiali: 20001
|
||||
|
||||
hosts: []
|
||||
Reference in New Issue
Block a user