feat: добавлена поддержка создания и удаления контейнеров из пресета
Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled
Some checks failed
Ansible Testing / lint (push) Has been cancelled
Ansible Testing / test (default) (push) Has been cancelled
Ansible Testing / test (minimal) (push) Has been cancelled
Ansible Testing / test (performance) (push) Has been cancelled
Ansible Testing / deploy-check (push) Has been cancelled
- Добавлено создание контейнеров из секции hosts в create_k8s_cluster.py - Добавлено удаление контейнеров в команде make k8s destroy - Создан скрипт scripts/delete_hosts.py для удаления контейнеров - Контейнеры автоматически создаются в Docker сети из пресета - Контейнеры удаляются вместе с Kind кластером при make k8s destroy
This commit is contained in:
9
Makefile
9
Makefile
@@ -1052,17 +1052,22 @@ k8s:
|
|||||||
echo "💡 Для подключения используйте: make k8s kubeconfig"; \
|
echo "💡 Для подключения используйте: make k8s kubeconfig"; \
|
||||||
echo "💡 Для остановки используйте: make k8s stop";; \
|
echo "💡 Для остановки используйте: make k8s stop";; \
|
||||||
destroy) \
|
destroy) \
|
||||||
echo "🗑️ Удаление Kind кластера..."; \
|
echo "🗑️ Удаление Kind кластера и контейнеров..."; \
|
||||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||||
PRESET=$${PRESET_ARG:-k8s-minimal}; \
|
PRESET=$${PRESET_ARG:-k8s-minimal}; \
|
||||||
CONTAINER_NAME=k8s-$$PRESET; \
|
CONTAINER_NAME=k8s-$$PRESET; \
|
||||||
if docker ps | grep -q $$CONTAINER_NAME; then \
|
if docker ps | grep -q $$CONTAINER_NAME; then \
|
||||||
|
echo "🗑️ Удаление Kind кластеров..."; \
|
||||||
docker exec $$CONTAINER_NAME bash -c "kind delete clusters --all" 2>/dev/null || true; \
|
docker exec $$CONTAINER_NAME bash -c "kind delete clusters --all" 2>/dev/null || true; \
|
||||||
else \
|
else \
|
||||||
echo "⚠️ Контейнер $$CONTAINER_NAME не запущен"; \
|
echo "⚠️ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||||
fi; \
|
fi; \
|
||||||
docker rm -f $$CONTAINER_NAME 2>/dev/null || true; \
|
docker rm -f $$CONTAINER_NAME 2>/dev/null || true; \
|
||||||
echo "✅ Kind кластер удален";; \
|
echo "🗑️ Удаление контейнеров из пресета..."; \
|
||||||
|
if [ -f "molecule/presets/k8s/$$PRESET.yml" ]; then \
|
||||||
|
python3 scripts/delete_hosts.py molecule/presets/k8s/$$PRESET.yml 2>/dev/null || true; \
|
||||||
|
fi; \
|
||||||
|
echo "✅ Удаление завершено";; \
|
||||||
stop) \
|
stop) \
|
||||||
echo "🛑 Остановка Kind кластера..."; \
|
echo "🛑 Остановка Kind кластера..."; \
|
||||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||||
|
|||||||
@@ -284,6 +284,3 @@
|
|||||||
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
||||||
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||||
|
|
||||||
# ---------- Kind clusters создаются через Python скрипт create_k8s_cluster.py ----------
|
|
||||||
# Все задачи по созданию кластеров и установке аддонов выполняются через make k8s create
|
|
||||||
@@ -1,428 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: localhost
|
|
||||||
gather_facts: false
|
|
||||||
vars:
|
|
||||||
# Получаем preset из переменной окружения или используем default
|
|
||||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
|
||||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
|
||||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
|
||||||
|
|
||||||
# Fallback значения если preset файл не найден
|
|
||||||
docker_network: labnet
|
|
||||||
generated_inventory: "{{ molecule_ephemeral_directory }}/inventory/hosts.ini"
|
|
||||||
images:
|
|
||||||
alt: "inecs/ansible-lab:alt-linux-latest"
|
|
||||||
astra: "inecs/ansible-lab:astra-linux-latest"
|
|
||||||
rhel: "inecs/ansible-lab:rhel-latest"
|
|
||||||
centos: "inecs/ansible-lab:centos-latest"
|
|
||||||
alma: "inecs/ansible-lab:alma-latest"
|
|
||||||
rocky: "inecs/ansible-lab:rocky-latest"
|
|
||||||
redos: "inecs/ansible-lab:redos-latest"
|
|
||||||
ubuntu: "inecs/ansible-lab:ubuntu-latest"
|
|
||||||
debian: "inecs/ansible-lab:debian-latest"
|
|
||||||
systemd_defaults:
|
|
||||||
privileged: true
|
|
||||||
command: "/sbin/init"
|
|
||||||
volumes:
|
|
||||||
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
|
|
||||||
tmpfs: ["/run", "/run/lock"]
|
|
||||||
capabilities: ["SYS_ADMIN"]
|
|
||||||
hosts:
|
|
||||||
- name: u1
|
|
||||||
family: debian
|
|
||||||
groups: [test]
|
|
||||||
kind_clusters: []
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
# - name: Install required collections
|
|
||||||
# command: ansible-galaxy collection install -r /workspace/requirements.yml
|
|
||||||
# delegate_to: localhost
|
|
||||||
# ignore_errors: true
|
|
||||||
# register: collections_install
|
|
||||||
# changed_when: false
|
|
||||||
# run_once: true
|
|
||||||
# become: true
|
|
||||||
# vars:
|
|
||||||
# ansible_python_interpreter: /usr/bin/python3
|
|
||||||
# environment:
|
|
||||||
# ANSIBLE_COLLECTIONS_PATH: /usr/share/ansible/collections
|
|
||||||
|
|
||||||
# Определяем архитектуру системы для корректной загрузки образов
|
|
||||||
- name: Detect system architecture
|
|
||||||
shell: |
|
|
||||||
arch=$(uname -m)
|
|
||||||
case $arch in
|
|
||||||
x86_64) echo "linux/amd64" ;;
|
|
||||||
aarch64|arm64) echo "linux/arm64" ;;
|
|
||||||
armv7l) echo "linux/arm/v7" ;;
|
|
||||||
*) echo "linux/amd64" ;;
|
|
||||||
esac
|
|
||||||
register: detected_platform
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Set ansible_architecture variable
|
|
||||||
set_fact:
|
|
||||||
ansible_architecture: "{{ detected_platform.stdout }}"
|
|
||||||
|
|
||||||
- name: Load preset configuration
|
|
||||||
include_vars: "{{ preset_file }}"
|
|
||||||
when: preset_file is file
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
# Фильтрация хостов по поддерживаемым платформам
|
|
||||||
- name: Filter hosts by supported platforms
|
|
||||||
set_fact:
|
|
||||||
filtered_hosts: "{{ filtered_hosts | default([]) + [item] }}"
|
|
||||||
loop: "{{ hosts }}"
|
|
||||||
when: |
|
|
||||||
item.supported_platforms is not defined or
|
|
||||||
ansible_architecture in item.supported_platforms
|
|
||||||
|
|
||||||
- name: Update hosts list with filtered results
|
|
||||||
set_fact:
|
|
||||||
hosts: "{{ filtered_hosts | default(hosts) }}"
|
|
||||||
|
|
||||||
- name: Display filtered hosts
|
|
||||||
debug:
|
|
||||||
msg: "Platform {{ ansible_architecture }}: {{ hosts | length }} hosts will be deployed"
|
|
||||||
|
|
||||||
- name: Ensure network exists
|
|
||||||
community.docker.docker_network:
|
|
||||||
name: "{{ docker_network }}"
|
|
||||||
state: present
|
|
||||||
|
|
||||||
# SYSTEMD nodes
|
|
||||||
- name: Pull systemd images with correct platform
|
|
||||||
command: "docker pull --platform {{ ansible_architecture }} {{ images[item.family] }}"
|
|
||||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
|
||||||
loop_control: { label: "{{ item.name }}" }
|
|
||||||
when: item.family is defined and images[item.family] is defined
|
|
||||||
register: pull_result
|
|
||||||
ignore_errors: yes
|
|
||||||
|
|
||||||
- name: Display pull results
|
|
||||||
debug:
|
|
||||||
msg: "Pulled {{ item.item.name }}: {{ 'OK' if (item.rc is defined and item.rc == 0) else 'SKIPPED (not available for this platform)' }}"
|
|
||||||
loop: "{{ pull_result.results | default([]) }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.item.name }}"
|
|
||||||
|
|
||||||
- name: Start systemd nodes
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
image: "{{ images[item.family] }}"
|
|
||||||
networks:
|
|
||||||
- name: "{{ docker_network }}"
|
|
||||||
privileged: "{{ systemd_defaults.privileged }}"
|
|
||||||
command: "{{ systemd_defaults.command }}"
|
|
||||||
volumes: "{{ systemd_defaults.volumes | default([]) + (item.volumes | default([])) }}"
|
|
||||||
tmpfs: "{{ systemd_defaults.tmpfs | default([]) }}"
|
|
||||||
capabilities: "{{ systemd_defaults.capabilities | default([]) }}"
|
|
||||||
published_ports: "{{ item.publish | default([]) }}"
|
|
||||||
env: "{{ item.env | default({}) }}"
|
|
||||||
# Специальные настройки для Astra Linux и RedOS (для совместимости с amd64 базовыми образами)
|
|
||||||
security_opts: "{{ ['seccomp=unconfined', 'apparmor=unconfined'] if item.family in ['astra', 'redos'] else [] }}"
|
|
||||||
platform: "{{ 'linux/amd64' if item.family in ['astra', 'redos'] else omit }}"
|
|
||||||
state: started
|
|
||||||
restart_policy: unless-stopped
|
|
||||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
|
||||||
loop_control: { label: "{{ item.name }}" }
|
|
||||||
when: item.family is defined and images[item.family] is defined
|
|
||||||
|
|
||||||
# Ожидание стабилизации контейнеров
|
|
||||||
- name: Wait for containers to be ready
|
|
||||||
pause:
|
|
||||||
seconds: 5
|
|
||||||
when: hosts | length > 0
|
|
||||||
|
|
||||||
# Создание tmp директории в контейнерах
|
|
||||||
- name: Create Ansible tmp directory in containers
|
|
||||||
community.docker.docker_container_exec:
|
|
||||||
container: "{{ item.name }}"
|
|
||||||
command: "mkdir -p /tmp/.ansible-tmp && chmod 755 /tmp/.ansible-tmp"
|
|
||||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
|
||||||
loop_control: { label: "{{ item.name }}" }
|
|
||||||
when: item.family is defined and images[item.family] is defined
|
|
||||||
ignore_errors: true
|
|
||||||
retries: 3
|
|
||||||
delay: 2
|
|
||||||
|
|
||||||
# DinD nodes
|
|
||||||
- name: Start DinD nodes (docker:27-dind)
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
image: "docker:27-dind"
|
|
||||||
networks:
|
|
||||||
- name: "{{ docker_network }}"
|
|
||||||
privileged: true
|
|
||||||
env:
|
|
||||||
DOCKER_TLS_CERTDIR: ""
|
|
||||||
published_ports: "{{ item.publish | default([]) }}"
|
|
||||||
volumes: "{{ (item.volumes | default([])) + [item.name + '-docker:/var/lib/docker'] }}"
|
|
||||||
state: started
|
|
||||||
restart_policy: unless-stopped
|
|
||||||
loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list }}"
|
|
||||||
loop_control: { label: "{{ item.name }}" }
|
|
||||||
|
|
||||||
# DOoD nodes (mount docker.sock)
|
|
||||||
- name: Start DOoD nodes (systemd + docker.sock mount)
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
image: "{{ images[item.family] }}"
|
|
||||||
networks:
|
|
||||||
- name: "{{ docker_network }}"
|
|
||||||
privileged: "{{ systemd_defaults.privileged }}"
|
|
||||||
command: "{{ systemd_defaults.command }}"
|
|
||||||
volumes: "{{ (systemd_defaults.volumes | default([])) + ['/var/run/docker.sock:/var/run/docker.sock'] + (item.volumes | default([])) }}"
|
|
||||||
tmpfs: "{{ systemd_defaults.tmpfs | default([]) }}"
|
|
||||||
capabilities: "{{ systemd_defaults.capabilities | default([]) }}"
|
|
||||||
published_ports: "{{ item.publish | default([]) }}"
|
|
||||||
env: "{{ item.env | default({}) }}"
|
|
||||||
state: started
|
|
||||||
restart_policy: unless-stopped
|
|
||||||
loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list }}"
|
|
||||||
loop_control: { label: "{{ item.name }}" }
|
|
||||||
when: item.family is defined and images[item.family] is defined
|
|
||||||
|
|
||||||
# Build groups map
|
|
||||||
- name: Initialize groups map
|
|
||||||
set_fact:
|
|
||||||
groups_map: {}
|
|
||||||
|
|
||||||
- name: Append hosts to groups
|
|
||||||
set_fact:
|
|
||||||
groups_map: "{{ groups_map | combine({ item_group: (groups_map[item_group] | default([])) + [item_name] }) }}"
|
|
||||||
loop: "{{ hosts | subelements('groups', skip_missing=True) }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.0.name }}"
|
|
||||||
vars:
|
|
||||||
item_name: "{{ item.0.name }}"
|
|
||||||
item_group: "{{ item.1 }}"
|
|
||||||
|
|
||||||
# Render inventory
|
|
||||||
- name: Render inventory ini
|
|
||||||
set_fact:
|
|
||||||
inv_content: |
|
|
||||||
[all:vars]
|
|
||||||
ansible_connection=community.docker.docker
|
|
||||||
ansible_remote_tmp=/tmp/.ansible-tmp
|
|
||||||
|
|
||||||
{% for group, members in (groups_map | dictsort) %}
|
|
||||||
[{{ group }}]
|
|
||||||
{% for h in members %}{{ h }}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
{% endfor %}
|
|
||||||
[all]
|
|
||||||
{% for h in hosts %}{{ h.name }}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
{# Группа с Debian-based системами (Debian, Ubuntu, Alt) - используем /usr/bin/python3 #}
|
|
||||||
{% set debian_hosts = [] %}
|
|
||||||
{% for h in hosts %}
|
|
||||||
{% if h.family in ['ubuntu', 'debian', 'alt'] %}
|
|
||||||
{% set _ = debian_hosts.append(h.name) %}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% if debian_hosts %}
|
|
||||||
[debian_family:vars]
|
|
||||||
ansible_python_interpreter=/usr/bin/python3
|
|
||||||
|
|
||||||
[debian_family]
|
|
||||||
{% for h in debian_hosts %}{{ h }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{# Группа с RHEL-based системами (RHEL, CentOS, Alma, Rocky, RedOS) #}
|
|
||||||
{% set rhel_hosts = [] %}
|
|
||||||
{% for h in hosts %}
|
|
||||||
{% if h.family in ['rhel', 'centos', 'alma', 'rocky', 'redos'] %}
|
|
||||||
{% set _ = rhel_hosts.append(h.name) %}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% if rhel_hosts %}
|
|
||||||
[rhel_family:vars]
|
|
||||||
ansible_python_interpreter=/usr/bin/python3
|
|
||||||
|
|
||||||
[rhel_family]
|
|
||||||
{% for h in rhel_hosts %}{{ h }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{# Astra Linux - используем /usr/bin/python3 #}
|
|
||||||
{% set astra_hosts = [] %}
|
|
||||||
{% for h in hosts %}
|
|
||||||
{% if h.family == 'astra' %}
|
|
||||||
{% set _ = astra_hosts.append(h.name) %}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% if astra_hosts %}
|
|
||||||
[astra_family:vars]
|
|
||||||
ansible_python_interpreter=/usr/bin/python3
|
|
||||||
|
|
||||||
[astra_family]
|
|
||||||
{% for h in astra_hosts %}{{ h }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{# Глобальный fallback для остальных хостов #}
|
|
||||||
[unmatched_hosts:vars]
|
|
||||||
ansible_python_interpreter=auto_silent
|
|
||||||
|
|
||||||
- name: Ensure inventory directory exists
|
|
||||||
file:
|
|
||||||
path: "{{ generated_inventory | dirname }}"
|
|
||||||
state: directory
|
|
||||||
mode: "0755"
|
|
||||||
|
|
||||||
- name: Write inventory file
|
|
||||||
copy:
|
|
||||||
dest: "{{ generated_inventory }}"
|
|
||||||
content: "{{ inv_content }}"
|
|
||||||
mode: "0644"
|
|
||||||
|
|
||||||
- name: Display inventory summary
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
📋 Inventory Summary:
|
|
||||||
- Total hosts: {{ hosts | length }}
|
|
||||||
- Groups: {{ groups_map.keys() | list | join(', ') }}
|
|
||||||
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
|
||||||
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
|
||||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
|
||||||
|
|
||||||
# ---------- Kind clusters (если определены) ----------
|
|
||||||
- name: Prepare kind cluster configs
|
|
||||||
set_fact:
|
|
||||||
kind_config_content: |
|
|
||||||
kind: Cluster
|
|
||||||
apiVersion: kind.x-k8s.io/v1alpha4
|
|
||||||
nodes:
|
|
||||||
- role: control-plane
|
|
||||||
{% if (item.addons|default({})).ingress_nginx|default(false) %}
|
|
||||||
extraPortMappings:
|
|
||||||
- containerPort: 80
|
|
||||||
hostPort: {{ item.ingress_host_http_port | default(8081) }}
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 443
|
|
||||||
hostPort: {{ item.ingress_host_https_port | default(8443) }}
|
|
||||||
protocol: TCP
|
|
||||||
{% endif %}
|
|
||||||
{% for i in range(item.workers | default(0)) %}
|
|
||||||
- role: worker
|
|
||||||
{% endfor %}
|
|
||||||
networking:
|
|
||||||
apiServerAddress: "0.0.0.0"
|
|
||||||
apiServerPort: {{ item.api_port | default(0) }}
|
|
||||||
loop: "{{ kind_clusters | default([]) }}"
|
|
||||||
when: (kind_clusters | default([])) | length > 0
|
|
||||||
|
|
||||||
- name: Create kind cluster configs
|
|
||||||
community.docker.docker_container_exec:
|
|
||||||
container: "{{ ansible_controller_container | default('ansible-controller') }}"
|
|
||||||
command: >
|
|
||||||
bash -c "
|
|
||||||
mkdir -p /ansible/.kind;
|
|
||||||
echo '{{ kind_config_content }}' > /ansible/.kind/{{ item.name }}.yaml
|
|
||||||
"
|
|
||||||
loop: "{{ kind_clusters | default([]) }}"
|
|
||||||
when: (kind_clusters | default([])) | length > 0
|
|
||||||
|
|
||||||
- name: Create kind clusters
|
|
||||||
community.docker.docker_container_exec:
|
|
||||||
container: ansible-controller
|
|
||||||
command: >
|
|
||||||
bash -lc '
|
|
||||||
set -e;
|
|
||||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
|
||||||
if kind get clusters | grep -qx "$$n"; then
|
|
||||||
echo "[kind] cluster $$n already exists";
|
|
||||||
else
|
|
||||||
echo "[kind] creating $$n";
|
|
||||||
kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml";
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
'
|
|
||||||
when: (kind_clusters | default([])) | length > 0
|
|
||||||
|
|
||||||
- name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled)
|
|
||||||
community.docker.docker_container_exec:
|
|
||||||
container: ansible-controller
|
|
||||||
command: >
|
|
||||||
bash -lc '
|
|
||||||
set -e;
|
|
||||||
helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true;
|
|
||||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true;
|
|
||||||
helm repo update >/dev/null 2>&1 || true;
|
|
||||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
|
||||||
# ingress-nginx
|
|
||||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then
|
|
||||||
echo "[addons] ingress-nginx on $$n";
|
|
||||||
kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true;
|
|
||||||
kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true;
|
|
||||||
fi
|
|
||||||
# metrics-server
|
|
||||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then
|
|
||||||
echo "[addons] metrics-server on $$n";
|
|
||||||
kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true;
|
|
||||||
kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \
|
|
||||||
"{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true;
|
|
||||||
fi
|
|
||||||
# istio (demo profile)
|
|
||||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
|
|
||||||
echo "[addons] istio (demo profile) on $$n";
|
|
||||||
istioctl install -y --set profile=demo --context kind-$$n;
|
|
||||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true;
|
|
||||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true;
|
|
||||||
fi
|
|
||||||
# kiali (server chart, anonymous auth) — требует istio/metrics
|
|
||||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then
|
|
||||||
echo "[addons] kiali on $$n";
|
|
||||||
kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true;
|
|
||||||
helm upgrade --install kiali-server kiali/kiali-server \
|
|
||||||
--namespace istio-system --kube-context kind-$$n \
|
|
||||||
--set auth.strategy=anonymous --wait --timeout 180s;
|
|
||||||
fi
|
|
||||||
# kube-prometheus-stack (Prometheus + Grafana)
|
|
||||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
|
|
||||||
echo "[addons] kube-prometheus-stack on $$n";
|
|
||||||
kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true;
|
|
||||||
helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
|
|
||||||
--namespace monitoring --kube-context kind-$$n \
|
|
||||||
--set grafana.adminPassword=admin \
|
|
||||||
--set grafana.defaultDashboardsTimezone=browser \
|
|
||||||
--wait --timeout 600s;
|
|
||||||
# дождаться графаны
|
|
||||||
kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true;
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
'
|
|
||||||
when: (kind_clusters | default([])) | length > 0
|
|
||||||
|
|
||||||
- name: Setup NodePort for addons
|
|
||||||
community.docker.docker_container_exec:
|
|
||||||
container: ansible-controller
|
|
||||||
command: >
|
|
||||||
bash -lc '
|
|
||||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
|
||||||
{% for cluster in kind_clusters | default([]) %}
|
|
||||||
{% if cluster.addon_ports is defined %}
|
|
||||||
if [ "$$n" = "{{ cluster.name }}" ]; then
|
|
||||||
{% if cluster.addon_ports.prometheus is defined %}
|
|
||||||
echo "[ports] Prometheus: {{ cluster.addon_ports.prometheus }}";
|
|
||||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.prometheus }}}]' 2>/dev/null || true;
|
|
||||||
{% endif %}
|
|
||||||
{% if cluster.addon_ports.grafana is defined %}
|
|
||||||
echo "[ports] Grafana: {{ cluster.addon_ports.grafana }}";
|
|
||||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-grafana --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.grafana }}}]' 2>/dev/null || true;
|
|
||||||
{% endif %}
|
|
||||||
{% if cluster.addon_ports.kiali is defined %}
|
|
||||||
echo "[ports] Kiali: {{ cluster.addon_ports.kiali }}";
|
|
||||||
kubectl --context kind-{{ cluster.name }} patch svc -n istio-system kiali --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.kiali }}}]' 2>/dev/null || true;
|
|
||||||
{% endif %}
|
|
||||||
fi
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
done
|
|
||||||
'
|
|
||||||
when: (kind_clusters | default([])) | length > 0
|
|
||||||
@@ -76,9 +76,6 @@
|
|||||||
# Используем переменную hosts из загруженного пресета
|
# Используем переменную hosts из загруженного пресета
|
||||||
hosts: "{{ hosts }}"
|
hosts: "{{ hosts }}"
|
||||||
|
|
||||||
# ---------- Kind clusters удаляются через make k8s destroy ----------
|
|
||||||
# Все задачи по удалению кластеров выполняются через команду make k8s destroy
|
|
||||||
|
|
||||||
- name: Display cleanup summary
|
- name: Display cleanup summary
|
||||||
debug:
|
debug:
|
||||||
msg: |
|
msg: |
|
||||||
|
|||||||
@@ -56,4 +56,11 @@ kind_clusters:
|
|||||||
grafana: 3000
|
grafana: 3000
|
||||||
kiali: 20001
|
kiali: 20001
|
||||||
|
|
||||||
hosts: []
|
hosts:
|
||||||
|
# Стандартный набор - 2 хоста для базового тестирования (стабильные ОС)
|
||||||
|
- name: u1
|
||||||
|
family: ubuntu22
|
||||||
|
groups: [test, web]
|
||||||
|
- name: u2
|
||||||
|
family: debian12
|
||||||
|
groups: [test, web]
|
||||||
|
|||||||
@@ -31,9 +31,76 @@ def main():
|
|||||||
with open(preset_file, 'r') as f:
|
with open(preset_file, 'r') as f:
|
||||||
preset = yaml.safe_load(f)
|
preset = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Создаем Docker сеть если её нет
|
||||||
|
docker_network = preset.get('docker_network', 'labnet')
|
||||||
|
print(f"\n🌐 Проверка Docker сети: {docker_network}")
|
||||||
|
result = subprocess.run(f"docker network ls --format '{{{{.Name}}}}' | grep -x {docker_network}",
|
||||||
|
shell=True, capture_output=True, text=True)
|
||||||
|
if not result.stdout.strip():
|
||||||
|
print(f"📡 Создание Docker сети: {docker_network}")
|
||||||
|
run_cmd(f"docker network create {docker_network}")
|
||||||
|
else:
|
||||||
|
print(f"✅ Сеть {docker_network} уже существует")
|
||||||
|
|
||||||
|
# Получаем конфигурацию для hosts
|
||||||
|
hosts = preset.get('hosts', [])
|
||||||
|
images = preset.get('images', {})
|
||||||
|
systemd_defaults = preset.get('systemd_defaults', {})
|
||||||
|
|
||||||
|
# Создаем контейнеры если определены hosts
|
||||||
|
if hosts:
|
||||||
|
print(f"\n🐳 Создание контейнеров (всего: {len(hosts)})")
|
||||||
|
for host in hosts:
|
||||||
|
host_name = host['name']
|
||||||
|
family = host['family']
|
||||||
|
|
||||||
|
# Проверяем существование контейнера
|
||||||
|
result = subprocess.run(f"docker ps -a --format '{{{{.Names}}}}' | grep -x {host_name}",
|
||||||
|
shell=True, capture_output=True, text=True)
|
||||||
|
if result.stdout.strip():
|
||||||
|
print(f"⚠️ Контейнер '{host_name}' уже существует, удаляем старый")
|
||||||
|
run_cmd(f"docker rm -f {host_name}")
|
||||||
|
|
||||||
|
# Получаем образ
|
||||||
|
image = images.get(family, f"inecs/ansible-lab:{family}-latest")
|
||||||
|
|
||||||
|
# Формируем команду docker run
|
||||||
|
cmd_parts = [
|
||||||
|
"docker run -d",
|
||||||
|
f"--name {host_name}",
|
||||||
|
f"--network {docker_network}",
|
||||||
|
"--restart=unless-stopped"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Добавляем systemd настройки
|
||||||
|
if systemd_defaults.get('privileged'):
|
||||||
|
cmd_parts.append("--privileged")
|
||||||
|
|
||||||
|
for vol in systemd_defaults.get('volumes', []):
|
||||||
|
cmd_parts.append(f"-v {vol}")
|
||||||
|
|
||||||
|
for tmpfs in systemd_defaults.get('tmpfs', []):
|
||||||
|
cmd_parts.append(f"--tmpfs {tmpfs}")
|
||||||
|
|
||||||
|
if systemd_defaults.get('capabilities'):
|
||||||
|
for cap in systemd_defaults['capabilities']:
|
||||||
|
cmd_parts.append(f"--cap-add {cap}")
|
||||||
|
|
||||||
|
cmd_parts.append(image)
|
||||||
|
|
||||||
|
# Добавляем command в конец если задан
|
||||||
|
if systemd_defaults.get('command'):
|
||||||
|
cmd_parts.append(systemd_defaults['command'])
|
||||||
|
|
||||||
|
cmd = " ".join(cmd_parts)
|
||||||
|
print(f"🚀 Создание контейнера: {host_name}")
|
||||||
|
run_cmd(cmd)
|
||||||
|
print(f"✅ Контейнер '{host_name}' создан")
|
||||||
|
|
||||||
kind_clusters = preset.get('kind_clusters', [])
|
kind_clusters = preset.get('kind_clusters', [])
|
||||||
if not kind_clusters:
|
if not kind_clusters:
|
||||||
print("⚠️ В пресете не определены kind кластеры")
|
print("\n⚠️ В пресете не определены kind кластеры")
|
||||||
|
print("✅ Создание контейнеров завершено")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
os.makedirs("/ansible/.kind", exist_ok=True)
|
os.makedirs("/ansible/.kind", exist_ok=True)
|
||||||
|
|||||||
44
scripts/delete_hosts.py
Normal file
44
scripts/delete_hosts.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Скрипт для удаления контейнеров из секции hosts пресета
|
||||||
|
Автор: Сергей Антропов
|
||||||
|
Сайт: https://devops.org.ru
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
print("Usage: delete_hosts.py <preset_file>")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
preset_file = sys.argv[1]
|
||||||
|
|
||||||
|
print(f"📋 Читаю пресет: {preset_file}")
|
||||||
|
with open(preset_file, 'r') as f:
|
||||||
|
preset = yaml.safe_load(f)
|
||||||
|
|
||||||
|
hosts = preset.get('hosts', [])
|
||||||
|
if not hosts:
|
||||||
|
print("⚠️ В пресете нет контейнеров для удаления")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
print(f"🗑️ Удаление контейнеров (всего: {len(hosts)})")
|
||||||
|
for host in hosts:
|
||||||
|
host_name = host['name']
|
||||||
|
|
||||||
|
# Проверяем существование контейнера
|
||||||
|
result = subprocess.run(f"docker ps -a --format '{{{{.Names}}}}' | grep -x {host_name}",
|
||||||
|
shell=True, capture_output=True, text=True)
|
||||||
|
if result.stdout.strip():
|
||||||
|
print(f"🗑️ Удаление контейнера: {host_name}")
|
||||||
|
subprocess.run(f"docker rm -f {host_name}", shell=True, capture_output=True, text=True)
|
||||||
|
print(f"✅ Контейнер '{host_name}' удален")
|
||||||
|
else:
|
||||||
|
print(f"⚠️ Контейнер '{host_name}' не найден")
|
||||||
|
|
||||||
|
print("✅ Удаление завершено")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Reference in New Issue
Block a user