k8s #1
3
.gitignore
vendored
3
.gitignore
vendored
@@ -180,3 +180,6 @@ cython_debug/
|
||||
# Cursor IDE
|
||||
.cursor/
|
||||
|
||||
# Kubernetes kubeconfig
|
||||
kubeconfig
|
||||
|
||||
|
||||
248
Makefile
248
Makefile
@@ -25,6 +25,7 @@ VERSION ?= 0.1.0
|
||||
AUTHOR ?= "Сергей Антропов"
|
||||
SITE ?= "https://devops.org.ru"
|
||||
DOCKER_IMAGE ?= inecs/ansible-lab:ansible-controller-latest
|
||||
DOCKER_K8S_IMAGE ?= inecs/ansible-lab:k8s-latest
|
||||
DOCKER_DIND_IMAGE ?= docker:27-dind
|
||||
CONTAINER_NAME ?= ansible-controller
|
||||
|
||||
@@ -40,7 +41,7 @@ DOCKER_BUILDX_BUILDER ?= multiarch-builder
|
||||
# Базовые образы и их теги
|
||||
BASE_IMAGES := altlinux/p9 astralinux/astra-1.7 redos/redos:9 registry.access.redhat.com/ubi8/ubi centos:7 quay.io/centos/centos:8 quay.io/centos/centos:stream9 almalinux:8 rockylinux:8 ubuntu:20.04 ubuntu:22.04 ubuntu:24.04 debian:9 debian:10 debian:11 debian:bookworm
|
||||
|
||||
.PHONY: role vault git docker presets controller help update-playbooks generate-docs setup-cicd list create delete
|
||||
.PHONY: role vault git docker presets controller k8s help update-playbooks generate-docs setup-cicd list create delete
|
||||
|
||||
# =============================================================================
|
||||
# КОМАНДЫ ДЛЯ РАБОТЫ С РОЛЯМИ
|
||||
@@ -904,6 +905,8 @@ docker-get-base-tag:
|
||||
TAG=$$(docker inspect --format='{{.RepoTags}}' $$BASE_IMAGE 2>/dev/null | tr -d '[]' | cut -d',' -f1 | cut -d':' -f2 | tr -d ' ' || echo "latest");; \
|
||||
ansible-controller) \
|
||||
TAG="latest";; \
|
||||
k8s) \
|
||||
TAG="latest";; \
|
||||
*) \
|
||||
echo "❌ Неизвестный образ: $(IMAGE)"; \
|
||||
exit 1;; \
|
||||
@@ -1012,6 +1015,238 @@ controller:
|
||||
echo " 💡 Удаляет: контейнеры и сети";; \
|
||||
esac
|
||||
|
||||
# =============================================================================
|
||||
# КОМАНДЫ ДЛЯ РАБОТЫ С KUBERNETES KIND
|
||||
# =============================================================================
|
||||
k8s:
|
||||
@case "$(word 2, $(MAKECMDGOALS))" in \
|
||||
create) \
|
||||
echo "☸️ Создание Kind кластера..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
PRESET=k8s-minimal; \
|
||||
echo "📋 Используется preset по умолчанию: $$PRESET (минимальный без аддонов)"; \
|
||||
else \
|
||||
PRESET=$$PRESET_ARG; \
|
||||
echo "📋 Используется preset: $$PRESET"; \
|
||||
fi; \
|
||||
if [ ! -f "molecule/presets/k8s/$$PRESET.yml" ]; then \
|
||||
echo "❌ Ошибка: Пресет '$$PRESET' не найден!"; \
|
||||
echo "💡 Доступные пресеты:"; \
|
||||
ls -1 molecule/presets/k8s/*.yml 2>/dev/null | sed 's|molecule/presets/k8s/||g' | sed 's|\.yml||g' | sed 's/^/ - /' || echo " - k8s-minimal"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET; \
|
||||
docker run -d --name $$CONTAINER_NAME --rm \
|
||||
-v "$(PWD):/workspace" -w /workspace \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-u root \
|
||||
-e ANSIBLE_FORCE_COLOR=1 \
|
||||
-e MOLECULE_PRESET=$$PRESET \
|
||||
-e MOLECULE_EPHEMERAL_DIRECTORY=/tmp/molecule_workspace \
|
||||
$(DOCKER_K8S_IMAGE) \
|
||||
/bin/bash -c 'sleep infinity'; \
|
||||
echo "🚀 Запуск создания кластера..."; \
|
||||
docker exec $$CONTAINER_NAME bash -c "cd /workspace && python3 /workspace/scripts/create_k8s_cluster.py molecule/presets/k8s/$$PRESET.yml $$CONTAINER_NAME"; \
|
||||
echo "✅ Kind кластер создан"; \
|
||||
echo "💡 Для подключения используйте: make k8s kubeconfig"; \
|
||||
echo "💡 Для остановки используйте: make k8s stop";; \
|
||||
destroy) \
|
||||
echo "🗑️ Удаление Kind кластера..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
PRESET=$${PRESET_ARG:-k8s-minimal}; \
|
||||
CONTAINER_NAME=k8s-$$PRESET; \
|
||||
if docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
docker exec $$CONTAINER_NAME bash -c "kind delete clusters --all" 2>/dev/null || true; \
|
||||
else \
|
||||
echo "⚠️ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
fi; \
|
||||
docker rm -f $$CONTAINER_NAME 2>/dev/null || true; \
|
||||
echo "✅ Kind кластер удален";; \
|
||||
stop) \
|
||||
echo "🛑 Остановка Kind кластера..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите пресет"; \
|
||||
echo "💡 Пример: make k8s stop kubernetes"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET_ARG; \
|
||||
if docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
docker exec $$CONTAINER_NAME bash -c "kind get clusters | xargs -I {} kind stop cluster --name {}" 2>/dev/null || true; \
|
||||
echo "✅ Kind кластер остановлен"; \
|
||||
else \
|
||||
echo "⚠️ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
fi; \
|
||||
echo "💡 Кластер остановлен, но не удален"; \
|
||||
echo "💡 Для перезапуска: make k8s start $$PRESET_ARG"; \
|
||||
echo "💡 Для полного удаления: make k8s destroy $$PRESET_ARG";; \
|
||||
start) \
|
||||
echo "🚀 Запуск Kind кластера..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите пресет"; \
|
||||
echo "💡 Пример: make k8s start kubernetes"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET_ARG; \
|
||||
if ! docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
echo "❌ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
echo "💡 Запустите: make k8s create $$PRESET_ARG"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker exec $$CONTAINER_NAME bash -c "kind get clusters | xargs -I {} kind start cluster --name {}" 2>/dev/null || true; \
|
||||
echo "✅ Kind кластер запущен";; \
|
||||
status) \
|
||||
echo "📊 Статус Kind кластеров:"; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите пресет"; \
|
||||
echo "💡 Пример: make k8s status kubernetes"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET_ARG; \
|
||||
if docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
docker exec $$CONTAINER_NAME bash -c "kind get clusters" 2>/dev/null || echo " Нет кластеров"; \
|
||||
docker exec $$CONTAINER_NAME bash -c "kind get clusters | while read cluster; do echo \"Кластер: \$$cluster\"; kubectl --context kind-\$$cluster get nodes 2>/dev/null || true; done" 2>/dev/null || true; \
|
||||
else \
|
||||
echo "⚠️ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
echo "💡 Запустите: make k8s create $$PRESET_ARG"; \
|
||||
fi;; \
|
||||
config) \
|
||||
echo "📋 Получение kubeconfig..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите пресет"; \
|
||||
echo "💡 Пример: make k8s config kubernetes"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET_ARG; \
|
||||
if ! docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
echo "❌ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
echo "💡 Запустите: make k8s create $$PRESET_ARG"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
KUBECONFIG_FILE="$$(pwd)/kubeconfig"; \
|
||||
docker exec $$CONTAINER_NAME bash -c "kind get kubeconfig" > $$KUBECONFIG_FILE 2>/dev/null || true; \
|
||||
if [ -f $$KUBECONFIG_FILE ] && [ -s $$KUBECONFIG_FILE ]; then \
|
||||
echo "✅ kubeconfig сохранен в: $$KUBECONFIG_FILE"; \
|
||||
echo ""; \
|
||||
echo "💡 Для использования:"; \
|
||||
echo " export KUBECONFIG=$$KUBECONFIG_FILE"; \
|
||||
echo " kubectl get nodes"; \
|
||||
echo ""; \
|
||||
echo "💡 Или для однократного использования:"; \
|
||||
echo " kubectl --kubeconfig=$$KUBECONFIG_FILE get nodes"; \
|
||||
else \
|
||||
echo "❌ Не удалось получить kubeconfig"; \
|
||||
rm -f $$KUBECONFIG_FILE; \
|
||||
fi;; \
|
||||
addon) \
|
||||
echo "📦 Установка аддона..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
MANIFEST_ARG="$(word 4, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите пресет"; \
|
||||
echo "💡 Пример: make k8s addon kubernetes https://example.com/manifest.yaml"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
if [ -z "$$MANIFEST_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите URL манифеста"; \
|
||||
echo "💡 Пример: make k8s addon kubernetes https://example.com/manifest.yaml"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET_ARG; \
|
||||
if ! docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
echo "❌ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
echo "💡 Запустите: make k8s create $$PRESET_ARG"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CLUSTER_NAME=$$(docker exec $$CONTAINER_NAME kind get clusters | head -1); \
|
||||
echo "📥 Установка аддона из $$MANIFEST_ARG..."; \
|
||||
docker exec $$CONTAINER_NAME bash -c "CLUSTER_NAME=$$CLUSTER_NAME; kubectl --server=https://\$${CLUSTER_NAME}-control-plane:6443 --insecure-skip-tls-verify apply -f $$MANIFEST_ARG"; \
|
||||
echo "✅ Аддон установлен";; \
|
||||
nodes) \
|
||||
echo "🖥️ Просмотр узлов кластера..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите пресет"; \
|
||||
echo "💡 Пример: make k8s nodes kubernetes"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET_ARG; \
|
||||
if ! docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
echo "❌ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
echo "💡 Запустите: make k8s create $$PRESET_ARG"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CLUSTER_NAME=$$(docker exec $$CONTAINER_NAME kind get clusters | head -1); \
|
||||
docker exec $$CONTAINER_NAME bash -c "CLUSTER_NAME=$$CLUSTER_NAME; kubectl --server=https://\$${CLUSTER_NAME}-control-plane:6443 --insecure-skip-tls-verify get nodes";; \
|
||||
shell) \
|
||||
echo "🐚 Открытие shell в контейнере..."; \
|
||||
PRESET_ARG="$(word 3, $(MAKECMDGOALS))"; \
|
||||
if [ -z "$$PRESET_ARG" ]; then \
|
||||
echo "❌ Ошибка: Укажите пресет"; \
|
||||
echo "💡 Пример: make k8s shell kubernetes"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
CONTAINER_NAME=k8s-$$PRESET_ARG; \
|
||||
if docker ps | grep -q $$CONTAINER_NAME; then \
|
||||
docker exec -it $$CONTAINER_NAME bash; \
|
||||
else \
|
||||
echo "❌ Контейнер $$CONTAINER_NAME не запущен"; \
|
||||
echo "💡 Запустите: make k8s create $$PRESET_ARG"; \
|
||||
fi;; \
|
||||
*) \
|
||||
echo "☸️ Доступные команды:"; \
|
||||
echo ""; \
|
||||
echo " make k8s create [preset] - создать Kind кластер"; \
|
||||
echo " 💡 Без параметра: используется k8s-minimal (без аддонов)"; \
|
||||
echo " 💡 С параметром: используется указанный пресет"; \
|
||||
echo " 💡 Кластер НЕ удаляется автоматически"; \
|
||||
echo ""; \
|
||||
echo " make k8s destroy [preset] - удалить Kind кластер полностью"; \
|
||||
echo " 💡 Удалит: кластер и контейнер ansible-controller"; \
|
||||
echo ""; \
|
||||
echo " make k8s stop [cluster] - остановить Kind кластер (без удаления)"; \
|
||||
echo " 💡 Можно указать имя кластера или остановить все"; \
|
||||
echo " 💡 Для перезапуска: make k8s start"; \
|
||||
echo ""; \
|
||||
echo " make k8s start [cluster] - запустить остановленный кластер"; \
|
||||
echo " 💡 Можно указать имя кластера или запустить все"; \
|
||||
echo ""; \
|
||||
echo " make k8s status [cluster] - показать статус кластеров"; \
|
||||
echo " 💡 Можно указать имя конкретного кластера"; \
|
||||
echo ""; \
|
||||
echo " make k8s config [cluster] - получить kubeconfig для подключения"; \
|
||||
echo " 💡 Сохраняет: kubeconfig в корне проекта"; \
|
||||
echo " 💡 Можно указать имя конкретного кластера"; \
|
||||
echo ""; \
|
||||
echo " make k8s addon [preset] [url] - установить аддон из манифеста"; \
|
||||
echo " 💡 Требует: пресет и URL манифеста"; \
|
||||
echo " 💡 Пример: make k8s addon kubernetes https://example.com/manifest.yaml"; \
|
||||
echo ""; \
|
||||
echo " make k8s nodes [preset] - показать узлы кластера"; \
|
||||
echo " 💡 Требует: пресет"; \
|
||||
echo " 💡 Пример: make k8s nodes kubernetes"; \
|
||||
echo ""; \
|
||||
echo " make k8s shell [preset] - открыть shell в контейнере"; \
|
||||
echo " 💡 Для: ручного управления kubectl/kind"; \
|
||||
echo " 💡 Пример: make k8s shell kubernetes"; \
|
||||
echo ""; \
|
||||
echo "💡 Примеры:"; \
|
||||
echo " make k8s create # создать минимальный кластер"; \
|
||||
echo " make k8s create kubernetes # создать кластер с аддонами"; \
|
||||
echo " make k8s nodes kubernetes # показать узлы кластера"; \
|
||||
echo " make k8s config kubernetes # получить kubeconfig для кластера"; \
|
||||
echo " export KUBECONFIG=kubeconfig # использовать конфиг"; \
|
||||
echo " kubectl get nodes # проверить узлы"; \
|
||||
echo " make k8s addon kubernetes https://example.com/manifest.yaml # установить аддон"; \
|
||||
echo " make k8s stop kubernetes # остановить кластер"; \
|
||||
echo " make k8s start kubernetes # запустить кластер"; \
|
||||
echo " make k8s destroy kubernetes # удалить кластер с пресетом kubernetes";; \
|
||||
esac
|
||||
|
||||
# =============================================================================
|
||||
# СПРАВКА
|
||||
# =============================================================================
|
||||
@@ -1105,6 +1340,17 @@ help:
|
||||
@echo " make controller run - запустить ansible-controller"
|
||||
@echo " make controller stop - остановить ansible-controller"
|
||||
@echo ""
|
||||
@echo "☸️ KUBERNETES (Kind кластеры):"
|
||||
@echo " make k8s create [preset] - создать Kind кластер (по умолчанию: k8s-minimal)"
|
||||
@echo " make k8s destroy [preset] - удалить Kind кластер"
|
||||
@echo " make k8s start [preset] - запустить Kind кластер"
|
||||
@echo " make k8s stop [preset] - остановить Kind кластер"
|
||||
@echo " make k8s status [preset] - показать статус кластера"
|
||||
@echo " make k8s nodes [preset] - показать узлы кластера"
|
||||
@echo " make k8s config [preset] - получить kubeconfig для подключения"
|
||||
@echo " make k8s addon [preset] [url] - установить аддон из манифеста"
|
||||
@echo " make k8s shell [preset] - открыть shell в контейнере k8s"
|
||||
@echo ""
|
||||
@echo "💡 ПРИМЕРЫ ИСПОЛЬЗОВАНИЯ:"
|
||||
@echo " make presets list # показать все preset'ы"
|
||||
@echo " make presets test PRESET=etcd-patroni # тест с etcd-patroni"
|
||||
|
||||
@@ -16,6 +16,8 @@ AnsibleLab - это универсальная система для разра
|
||||
- **Автоматическая проверка** синтаксиса Ansible ролей
|
||||
- **Управление секретами** через Ansible Vault
|
||||
- **Готовые Docker образы** для разных ОС
|
||||
- **Kubernetes Kind кластеры** для тестирования в среде Kubernetes
|
||||
- **Автоматическая установка аддонов** (Istio, Prometheus, Grafana, Kiali и другие)
|
||||
|
||||
## 📁 Структура проекта
|
||||
|
||||
@@ -501,6 +503,10 @@ make custom-images # справка по собственным
|
||||
|
||||
- **[docs/dockerfiles.md](docs/dockerfiles.md)** - Полная документация по Docker образам
|
||||
|
||||
### Kubernetes
|
||||
|
||||
- **[docs/kubernetes-kind.md](docs/kubernetes-kind.md)** - Документация по работе с Kind кластерами
|
||||
|
||||
## 🐳 Docker образы
|
||||
|
||||
Проект использует готовые Docker образы для различных ОС:
|
||||
@@ -620,6 +626,7 @@ MIT License
|
||||
- ✅ Управление секретами через Ansible Vault
|
||||
- ✅ Готовые Docker образы для разных ОС
|
||||
- ✅ CI/CD интеграция
|
||||
- ✅ Kubernetes Kind кластеры для тестирования
|
||||
|
||||
---
|
||||
|
||||
|
||||
73
dockerfiles/k8s/Dockerfile
Normal file
73
dockerfiles/k8s/Dockerfile
Normal file
@@ -0,0 +1,73 @@
|
||||
# Kubernetes Kind Container - Multi-Arch
|
||||
# Автор: Сергей Антропов
|
||||
# Сайт: https://devops.org.ru
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Обновляем систему
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get clean
|
||||
|
||||
# Устанавливаем базовые пакеты
|
||||
RUN apt-get install -y \
|
||||
wget \
|
||||
curl \
|
||||
git \
|
||||
vim \
|
||||
bash \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-yaml \
|
||||
file \
|
||||
apt-transport-https \
|
||||
gnupg \
|
||||
lsb-release \
|
||||
&& apt-get clean
|
||||
|
||||
# Устанавливаем Docker CLI
|
||||
RUN DOCKER_VERSION=20.10.24 && \
|
||||
if [ "${TARGETARCH}" = "amd64" ]; then \
|
||||
wget -O /tmp/docker-cli.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz" && \
|
||||
tar -xz -C /tmp -f /tmp/docker-cli.tgz && \
|
||||
mv /tmp/docker/docker /usr/local/bin/ && \
|
||||
rm -rf /tmp/docker-cli.tgz /tmp/docker; \
|
||||
else \
|
||||
wget -O /tmp/docker-cli.tgz "https://download.docker.com/linux/static/stable/aarch64/docker-${DOCKER_VERSION}.tgz" && \
|
||||
tar -xz -C /tmp -f /tmp/docker-cli.tgz && \
|
||||
mv /tmp/docker/docker /usr/local/bin/ && \
|
||||
rm -rf /tmp/docker-cli.tgz /tmp/docker; \
|
||||
fi && \
|
||||
chmod +x /usr/local/bin/docker
|
||||
|
||||
# Устанавливаем kubectl
|
||||
RUN if [ "${TARGETARCH}" = "amd64" ]; then \
|
||||
wget -O kubectl "https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl"; \
|
||||
else \
|
||||
wget -O kubectl "https://dl.k8s.io/release/v1.34.1/bin/linux/arm64/kubectl"; \
|
||||
fi && \
|
||||
chmod +x kubectl && \
|
||||
mv kubectl /usr/local/bin/
|
||||
|
||||
# Устанавливаем Helm
|
||||
RUN wget https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 -O - | bash
|
||||
|
||||
# Устанавливаем Kind v0.30.0
|
||||
RUN if [ "${TARGETARCH}" = "amd64" ]; then \
|
||||
wget -O /usr/local/bin/kind "https://github.com/kubernetes-sigs/kind/releases/download/v0.30.0/kind-linux-amd64"; \
|
||||
else \
|
||||
wget -O /usr/local/bin/kind "https://github.com/kubernetes-sigs/kind/releases/download/v0.30.0/kind-linux-arm64"; \
|
||||
fi && \
|
||||
chmod +x /usr/local/bin/kind && \
|
||||
ls -lh /usr/local/bin/kind && \
|
||||
file /usr/local/bin/kind
|
||||
|
||||
# Устанавливаем Istio CLI
|
||||
RUN ARCH=$(echo ${TARGETARCH} | sed 's/amd64/x86_64/; s/arm64/aarch64/') && \
|
||||
ISTIO_VERSION=1.22.1 && \
|
||||
wget -qO- https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VERSION} TARGET_ARCH=${ARCH} sh - && \
|
||||
mv istio-${ISTIO_VERSION}/bin/istioctl /usr/local/bin/ && \
|
||||
rm -rf istio-${ISTIO_VERSION}
|
||||
|
||||
# Команда по умолчанию
|
||||
CMD ["sleep", "infinity"]
|
||||
297
docs/kubernetes-kind.md
Normal file
297
docs/kubernetes-kind.md
Normal file
@@ -0,0 +1,297 @@
|
||||
# Kubernetes Kind Кластеры
|
||||
|
||||
**Автор:** Сергей Антропов
|
||||
**Сайт:** https://devops.org.ru
|
||||
|
||||
## Описание
|
||||
|
||||
Проект поддерживает автоматическое создание и управление Kubernetes кластерами на базе [Kind](https://kind.sigs.k8s.io/) для тестирования в изолированной лабораторной среде.
|
||||
|
||||
## Возможности
|
||||
|
||||
- Создание Kind кластеров с настраиваемым количеством worker-узлов
|
||||
- Автоматическая установка аддонов:
|
||||
- Ingress NGINX Controller
|
||||
- Metrics Server
|
||||
- Istio Service Mesh
|
||||
- Kiali (визуализация Istio)
|
||||
- Prometheus Stack (Prometheus + Grafana)
|
||||
- Настройка портов для внешнего доступа к аддонам
|
||||
- Интеграция с Docker контейнерами в одной лабораторной сети
|
||||
|
||||
## Команды
|
||||
|
||||
### Создание кластера
|
||||
|
||||
```bash
|
||||
# Создание минимального кластера (без аддонов)
|
||||
make k8s create
|
||||
|
||||
# Создание кластера с полным набором аддонов
|
||||
make k8s create kubernetes
|
||||
|
||||
# Использование пользовательского пресета
|
||||
make k8s create my-custom-preset
|
||||
```
|
||||
|
||||
### Управление кластером
|
||||
|
||||
```bash
|
||||
# Удаление кластера
|
||||
make k8s destroy [preset]
|
||||
|
||||
# Остановка кластера (без удаления)
|
||||
make k8s stop [cluster]
|
||||
|
||||
# Запуск остановленного кластера
|
||||
make k8s start [cluster]
|
||||
|
||||
# Проверка статуса кластера
|
||||
make k8s status [cluster]
|
||||
|
||||
# Получение kubeconfig для подключения
|
||||
make k8s config [cluster]
|
||||
|
||||
# Открытие shell в контейнере
|
||||
make k8s shell
|
||||
```
|
||||
|
||||
## Конфигурация
|
||||
|
||||
### Пресеты Kubernetes хранятся в `molecule/presets/k8s/`
|
||||
|
||||
#### Минимальный кластер (`k8s-minimal.yml`)
|
||||
|
||||
```yaml
|
||||
kind_clusters:
|
||||
- name: minimal
|
||||
workers: 0 # Только control-plane узел
|
||||
api_port: 6443
|
||||
```
|
||||
|
||||
#### Полный кластер с аддонами (`kubernetes.yml`)
|
||||
|
||||
```yaml
|
||||
kind_clusters:
|
||||
- name: lab
|
||||
workers: 2
|
||||
api_port: 6443
|
||||
addons:
|
||||
ingress_nginx: true
|
||||
metrics_server: true
|
||||
istio: true
|
||||
kiali: true
|
||||
prometheus_stack: true
|
||||
ingress_host_http_port: 8081
|
||||
ingress_host_https_port: 8443
|
||||
# Порты для внешнего доступа к аддонам
|
||||
addon_ports:
|
||||
prometheus: 9090
|
||||
grafana: 3000
|
||||
kiali: 20001
|
||||
```
|
||||
|
||||
## Доступ к аддонам
|
||||
|
||||
После создания кластера с аддонами, они доступны на следующих портах:
|
||||
|
||||
### Prometheus
|
||||
```bash
|
||||
# Web UI доступна на порту 9090
|
||||
http://localhost:9090
|
||||
```
|
||||
|
||||
### Grafana
|
||||
```bash
|
||||
# Web UI доступна на порту 3000
|
||||
http://localhost:3000
|
||||
|
||||
# Пароль администратора
|
||||
kubectl get secret -n monitoring monitoring-grafana \
|
||||
-o jsonpath="{.data.admin-password}" | base64 -d
|
||||
|
||||
# Логин: admin
|
||||
# Пароль: (получен выше)
|
||||
```
|
||||
|
||||
### Kiali
|
||||
```bash
|
||||
# Web UI доступна на порту 20001
|
||||
http://localhost:20001
|
||||
|
||||
# Аутентификация: anonymous (отключена по умолчанию)
|
||||
```
|
||||
|
||||
### Istio Ingress
|
||||
```bash
|
||||
# HTTP доступен на порту 8081
|
||||
http://localhost:8081
|
||||
|
||||
# HTTPS доступен на порту 8443
|
||||
https://localhost:8443
|
||||
```
|
||||
|
||||
## Примеры использования
|
||||
|
||||
### Создание и настройка кластера
|
||||
|
||||
```bash
|
||||
# 1. Создать кластер с аддонами
|
||||
make k8s create kubernetes
|
||||
|
||||
# 2. Проверить статус
|
||||
make k8s status
|
||||
|
||||
# 3. Получить kubeconfig
|
||||
make k8s config lab
|
||||
|
||||
# 4. Использовать kubeconfig
|
||||
export KUBECONFIG=kubeconfig
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
# 5. Открыть Grafana в браузере
|
||||
open http://localhost:3000
|
||||
# Логин: admin
|
||||
# Пароль: (получить командой выше)
|
||||
```
|
||||
|
||||
### Управление кластером
|
||||
|
||||
```bash
|
||||
# Остановить кластер (без удаления)
|
||||
make k8s stop lab
|
||||
|
||||
# Запустить остановленный кластер
|
||||
make k8s start lab
|
||||
|
||||
# Проверить конкретный кластер
|
||||
make k8s status lab
|
||||
|
||||
# Получить kubeconfig для конкретного кластера
|
||||
make k8s config lab
|
||||
|
||||
# Удалить кластер
|
||||
make k8s destroy kubernetes
|
||||
```
|
||||
|
||||
### Работа внутри контейнера
|
||||
|
||||
```bash
|
||||
# Открыть shell в контейнере ansible-controller
|
||||
make k8s shell
|
||||
|
||||
# Внутри контейнера:
|
||||
kind get clusters
|
||||
kubectl --context kind-lab get nodes
|
||||
istioctl --context kind-lab proxy-status
|
||||
kubectl --context kind-lab get pods -n monitoring
|
||||
```
|
||||
|
||||
## Архитектура
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Docker Network: labnet │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ Ubuntu22 │ │ Debian12 │ │ Ansible │ │
|
||||
│ │ Container │ │ Container │ │ Controller │ │
|
||||
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────────────────────┐ │
|
||||
│ │ Kind Cluster: "lab" │ │
|
||||
│ │ ┌──────────────┐ ┌──────────────┐ │ │
|
||||
│ │ │ Control Plane│ │ Worker 1 │ │ │
|
||||
│ │ │ Port 6443 │ │ │ │ │
|
||||
│ │ └──────────────┘ └──────────────┘ │ │
|
||||
│ │ ┌──────────────┐ │ │
|
||||
│ │ │ Worker 2 │ │ │
|
||||
│ │ └──────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ NodePort Services: │ │
|
||||
│ │ - Prometheus :9090 │ │
|
||||
│ │ - Grafana :3000 │ │
|
||||
│ │ - Kiali :20001 │ │
|
||||
│ │ - Ingress :8081 (HTTP), :8443 (HTTPS) │ │
|
||||
│ └──────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Настройка портов аддонов
|
||||
|
||||
Вы можете настроить порты для внешнего доступа в пресете:
|
||||
|
||||
```yaml
|
||||
kind_clusters:
|
||||
- name: lab
|
||||
workers: 2
|
||||
addons:
|
||||
prometheus_stack: true
|
||||
kiali: true
|
||||
addon_ports:
|
||||
prometheus: 9090 # Prometheus UI
|
||||
grafana: 3000 # Grafana UI
|
||||
kiali: 20001 # Kiali UI
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Минимальные ресурсы:** Для быстрого тестирования используйте `workers: 0` (только control-plane)
|
||||
2. **Production-like:** Для реалистичных тестов используйте `workers: 2-3`
|
||||
3. **Аддоны:** Включайте только необходимые аддоны для уменьшения времени создания
|
||||
4. **Изоляция:** Каждый preset может иметь свой уникальный кластер с разными настройками
|
||||
5. **Порты:** Используйте разные порты для разных кластеров, если запускаете несколько
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Кластер не создается
|
||||
|
||||
```bash
|
||||
# Проверить логи
|
||||
docker logs ansible-controller
|
||||
|
||||
# Проверить доступное место на диске
|
||||
df -h
|
||||
|
||||
# Проверить Docker ресурсы
|
||||
docker system df
|
||||
```
|
||||
|
||||
### Проблемы с аддонами
|
||||
|
||||
```bash
|
||||
# Проверить статус подов
|
||||
kubectl get pods -A
|
||||
|
||||
# Проверить сервисы
|
||||
kubectl get svc -A
|
||||
|
||||
# Проверить порты
|
||||
kubectl get svc -n monitoring
|
||||
```
|
||||
|
||||
### Проблемы с Istio
|
||||
|
||||
```bash
|
||||
# Переустановить Istio
|
||||
istioctl uninstall -y --context kind-lab
|
||||
istioctl install -y --set profile=demo --context kind-lab
|
||||
```
|
||||
|
||||
### Проблемы с Prometheus Stack
|
||||
|
||||
```bash
|
||||
# Переустановить
|
||||
helm uninstall monitoring -n monitoring
|
||||
helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
|
||||
--namespace monitoring --kube-context kind-lab
|
||||
```
|
||||
|
||||
## Дополнительные ресурсы
|
||||
|
||||
- [Kind Documentation](https://kind.sigs.k8s.io/docs/)
|
||||
- [Istio Documentation](https://istio.io/latest/docs/)
|
||||
- [Kiali Documentation](https://kiali.io/documentation/)
|
||||
- [Prometheus Operator](https://prometheus-operator.dev/)
|
||||
@@ -4,7 +4,8 @@
|
||||
vars:
|
||||
# Получаем preset из переменной окружения или используем default
|
||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
||||
preset_file: "/workspace/molecule/presets/{{ preset_name }}.yml"
|
||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
||||
|
||||
# Fallback значения если preset файл не найден
|
||||
docker_network: labnet
|
||||
@@ -30,6 +31,7 @@
|
||||
- name: u1
|
||||
family: debian
|
||||
groups: [test]
|
||||
kind_clusters: []
|
||||
|
||||
tasks:
|
||||
# - name: Install required collections
|
||||
@@ -281,4 +283,135 @@
|
||||
- Groups: {{ groups_map.keys() | list | join(', ') }}
|
||||
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
||||
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
|
||||
# ---------- Kind clusters (если определены) ----------
|
||||
- name: Create kind cluster configs
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
mkdir -p /ansible/.kind;
|
||||
cat > /ansible/.kind/{{ item.name }}.yaml <<EOF
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
{% if (item.addons|default({})).ingress_nginx|default(false) %}
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: {{ item.ingress_host_http_port | default(8081) }}
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: {{ item.ingress_host_https_port | default(8443) }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
{% for i in range(item.workers | default(0)) %}
|
||||
- role: worker
|
||||
{% endfor %}
|
||||
networking:
|
||||
apiServerAddress: "0.0.0.0"
|
||||
apiServerPort: {{ item.api_port | default(0) }}
|
||||
EOF
|
||||
'
|
||||
loop: "{{ kind_clusters | default([]) }}"
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Create kind clusters
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
if kind get clusters | grep -qx "$$n"; then
|
||||
echo "[kind] cluster $$n already exists";
|
||||
else
|
||||
echo "[kind] creating $$n";
|
||||
kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml";
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled)
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo update >/dev/null 2>&1 || true;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
# ingress-nginx
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then
|
||||
echo "[addons] ingress-nginx on $$n";
|
||||
kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true;
|
||||
kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true;
|
||||
fi
|
||||
# metrics-server
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then
|
||||
echo "[addons] metrics-server on $$n";
|
||||
kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true;
|
||||
kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \
|
||||
"{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true;
|
||||
fi
|
||||
# istio (demo profile)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
|
||||
echo "[addons] istio (demo profile) on $$n";
|
||||
istioctl install -y --set profile=demo --context kind-$$n;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true;
|
||||
fi
|
||||
# kiali (server chart, anonymous auth) — требует istio/metrics
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then
|
||||
echo "[addons] kiali on $$n";
|
||||
kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true;
|
||||
helm upgrade --install kiali-server kiali/kiali-server \
|
||||
--namespace istio-system --kube-context kind-$$n \
|
||||
--set auth.strategy=anonymous --wait --timeout 180s;
|
||||
fi
|
||||
# kube-prometheus-stack (Prometheus + Grafana)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
|
||||
echo "[addons] kube-prometheus-stack on $$n";
|
||||
kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true;
|
||||
helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
|
||||
--namespace monitoring --kube-context kind-$$n \
|
||||
--set grafana.adminPassword=admin \
|
||||
--set grafana.defaultDashboardsTimezone=browser \
|
||||
--wait --timeout 600s;
|
||||
# дождаться графаны
|
||||
kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true;
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Setup NodePort for addons
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
{% for cluster in kind_clusters | default([]) %}
|
||||
{% if cluster.addon_ports is defined %}
|
||||
if [ "$$n" = "{{ cluster.name }}" ]; then
|
||||
{% if cluster.addon_ports.prometheus is defined %}
|
||||
echo "[ports] Prometheus: {{ cluster.addon_ports.prometheus }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.prometheus }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.grafana is defined %}
|
||||
echo "[ports] Grafana: {{ cluster.addon_ports.grafana }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-grafana --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.grafana }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.kiali is defined %}
|
||||
echo "[ports] Kiali: {{ cluster.addon_ports.kiali }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n istio-system kiali --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.kiali }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
fi
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
428
molecule/default/create.yml.bak
Normal file
428
molecule/default/create.yml.bak
Normal file
@@ -0,0 +1,428 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
vars:
|
||||
# Получаем preset из переменной окружения или используем default
|
||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
||||
|
||||
# Fallback значения если preset файл не найден
|
||||
docker_network: labnet
|
||||
generated_inventory: "{{ molecule_ephemeral_directory }}/inventory/hosts.ini"
|
||||
images:
|
||||
alt: "inecs/ansible-lab:alt-linux-latest"
|
||||
astra: "inecs/ansible-lab:astra-linux-latest"
|
||||
rhel: "inecs/ansible-lab:rhel-latest"
|
||||
centos: "inecs/ansible-lab:centos-latest"
|
||||
alma: "inecs/ansible-lab:alma-latest"
|
||||
rocky: "inecs/ansible-lab:rocky-latest"
|
||||
redos: "inecs/ansible-lab:redos-latest"
|
||||
ubuntu: "inecs/ansible-lab:ubuntu-latest"
|
||||
debian: "inecs/ansible-lab:debian-latest"
|
||||
systemd_defaults:
|
||||
privileged: true
|
||||
command: "/sbin/init"
|
||||
volumes:
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
hosts:
|
||||
- name: u1
|
||||
family: debian
|
||||
groups: [test]
|
||||
kind_clusters: []
|
||||
|
||||
tasks:
|
||||
# - name: Install required collections
|
||||
# command: ansible-galaxy collection install -r /workspace/requirements.yml
|
||||
# delegate_to: localhost
|
||||
# ignore_errors: true
|
||||
# register: collections_install
|
||||
# changed_when: false
|
||||
# run_once: true
|
||||
# become: true
|
||||
# vars:
|
||||
# ansible_python_interpreter: /usr/bin/python3
|
||||
# environment:
|
||||
# ANSIBLE_COLLECTIONS_PATH: /usr/share/ansible/collections
|
||||
|
||||
# Определяем архитектуру системы для корректной загрузки образов
|
||||
- name: Detect system architecture
|
||||
shell: |
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64) echo "linux/amd64" ;;
|
||||
aarch64|arm64) echo "linux/arm64" ;;
|
||||
armv7l) echo "linux/arm/v7" ;;
|
||||
*) echo "linux/amd64" ;;
|
||||
esac
|
||||
register: detected_platform
|
||||
changed_when: false
|
||||
|
||||
- name: Set ansible_architecture variable
|
||||
set_fact:
|
||||
ansible_architecture: "{{ detected_platform.stdout }}"
|
||||
|
||||
- name: Load preset configuration
|
||||
include_vars: "{{ preset_file }}"
|
||||
when: preset_file is file
|
||||
ignore_errors: true
|
||||
|
||||
# Фильтрация хостов по поддерживаемым платформам
|
||||
- name: Filter hosts by supported platforms
|
||||
set_fact:
|
||||
filtered_hosts: "{{ filtered_hosts | default([]) + [item] }}"
|
||||
loop: "{{ hosts }}"
|
||||
when: |
|
||||
item.supported_platforms is not defined or
|
||||
ansible_architecture in item.supported_platforms
|
||||
|
||||
- name: Update hosts list with filtered results
|
||||
set_fact:
|
||||
hosts: "{{ filtered_hosts | default(hosts) }}"
|
||||
|
||||
- name: Display filtered hosts
|
||||
debug:
|
||||
msg: "Platform {{ ansible_architecture }}: {{ hosts | length }} hosts will be deployed"
|
||||
|
||||
- name: Ensure network exists
|
||||
community.docker.docker_network:
|
||||
name: "{{ docker_network }}"
|
||||
state: present
|
||||
|
||||
# SYSTEMD nodes
|
||||
- name: Pull systemd images with correct platform
|
||||
command: "docker pull --platform {{ ansible_architecture }} {{ images[item.family] }}"
|
||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
register: pull_result
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Display pull results
|
||||
debug:
|
||||
msg: "Pulled {{ item.item.name }}: {{ 'OK' if (item.rc is defined and item.rc == 0) else 'SKIPPED (not available for this platform)' }}"
|
||||
loop: "{{ pull_result.results | default([]) }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.name }}"
|
||||
|
||||
- name: Start systemd nodes
|
||||
community.docker.docker_container:
|
||||
name: "{{ item.name }}"
|
||||
image: "{{ images[item.family] }}"
|
||||
networks:
|
||||
- name: "{{ docker_network }}"
|
||||
privileged: "{{ systemd_defaults.privileged }}"
|
||||
command: "{{ systemd_defaults.command }}"
|
||||
volumes: "{{ systemd_defaults.volumes | default([]) + (item.volumes | default([])) }}"
|
||||
tmpfs: "{{ systemd_defaults.tmpfs | default([]) }}"
|
||||
capabilities: "{{ systemd_defaults.capabilities | default([]) }}"
|
||||
published_ports: "{{ item.publish | default([]) }}"
|
||||
env: "{{ item.env | default({}) }}"
|
||||
# Специальные настройки для Astra Linux и RedOS (для совместимости с amd64 базовыми образами)
|
||||
security_opts: "{{ ['seccomp=unconfined', 'apparmor=unconfined'] if item.family in ['astra', 'redos'] else [] }}"
|
||||
platform: "{{ 'linux/amd64' if item.family in ['astra', 'redos'] else omit }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
|
||||
# Ожидание стабилизации контейнеров
|
||||
- name: Wait for containers to be ready
|
||||
pause:
|
||||
seconds: 5
|
||||
when: hosts | length > 0
|
||||
|
||||
# Создание tmp директории в контейнерах
|
||||
- name: Create Ansible tmp directory in containers
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ item.name }}"
|
||||
command: "mkdir -p /tmp/.ansible-tmp && chmod 755 /tmp/.ansible-tmp"
|
||||
loop: "{{ hosts | selectattr('type','undefined') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
ignore_errors: true
|
||||
retries: 3
|
||||
delay: 2
|
||||
|
||||
# DinD nodes
|
||||
- name: Start DinD nodes (docker:27-dind)
|
||||
community.docker.docker_container:
|
||||
name: "{{ item.name }}"
|
||||
image: "docker:27-dind"
|
||||
networks:
|
||||
- name: "{{ docker_network }}"
|
||||
privileged: true
|
||||
env:
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
published_ports: "{{ item.publish | default([]) }}"
|
||||
volumes: "{{ (item.volumes | default([])) + [item.name + '-docker:/var/lib/docker'] }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
|
||||
# DOoD nodes (mount docker.sock)
|
||||
- name: Start DOoD nodes (systemd + docker.sock mount)
|
||||
community.docker.docker_container:
|
||||
name: "{{ item.name }}"
|
||||
image: "{{ images[item.family] }}"
|
||||
networks:
|
||||
- name: "{{ docker_network }}"
|
||||
privileged: "{{ systemd_defaults.privileged }}"
|
||||
command: "{{ systemd_defaults.command }}"
|
||||
volumes: "{{ (systemd_defaults.volumes | default([])) + ['/var/run/docker.sock:/var/run/docker.sock'] + (item.volumes | default([])) }}"
|
||||
tmpfs: "{{ systemd_defaults.tmpfs | default([]) }}"
|
||||
capabilities: "{{ systemd_defaults.capabilities | default([]) }}"
|
||||
published_ports: "{{ item.publish | default([]) }}"
|
||||
env: "{{ item.env | default({}) }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
loop: "{{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list }}"
|
||||
loop_control: { label: "{{ item.name }}" }
|
||||
when: item.family is defined and images[item.family] is defined
|
||||
|
||||
# Build groups map
|
||||
- name: Initialize groups map
|
||||
set_fact:
|
||||
groups_map: {}
|
||||
|
||||
- name: Append hosts to groups
|
||||
set_fact:
|
||||
groups_map: "{{ groups_map | combine({ item_group: (groups_map[item_group] | default([])) + [item_name] }) }}"
|
||||
loop: "{{ hosts | subelements('groups', skip_missing=True) }}"
|
||||
loop_control:
|
||||
label: "{{ item.0.name }}"
|
||||
vars:
|
||||
item_name: "{{ item.0.name }}"
|
||||
item_group: "{{ item.1 }}"
|
||||
|
||||
# Render inventory
|
||||
- name: Render inventory ini
|
||||
set_fact:
|
||||
inv_content: |
|
||||
[all:vars]
|
||||
ansible_connection=community.docker.docker
|
||||
ansible_remote_tmp=/tmp/.ansible-tmp
|
||||
|
||||
{% for group, members in (groups_map | dictsort) %}
|
||||
[{{ group }}]
|
||||
{% for h in members %}{{ h }}
|
||||
{% endfor %}
|
||||
|
||||
{% endfor %}
|
||||
[all]
|
||||
{% for h in hosts %}{{ h.name }}
|
||||
{% endfor %}
|
||||
|
||||
{# Группа с Debian-based системами (Debian, Ubuntu, Alt) - используем /usr/bin/python3 #}
|
||||
{% set debian_hosts = [] %}
|
||||
{% for h in hosts %}
|
||||
{% if h.family in ['ubuntu', 'debian', 'alt'] %}
|
||||
{% set _ = debian_hosts.append(h.name) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if debian_hosts %}
|
||||
[debian_family:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
[debian_family]
|
||||
{% for h in debian_hosts %}{{ h }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Группа с RHEL-based системами (RHEL, CentOS, Alma, Rocky, RedOS) #}
|
||||
{% set rhel_hosts = [] %}
|
||||
{% for h in hosts %}
|
||||
{% if h.family in ['rhel', 'centos', 'alma', 'rocky', 'redos'] %}
|
||||
{% set _ = rhel_hosts.append(h.name) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if rhel_hosts %}
|
||||
[rhel_family:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
[rhel_family]
|
||||
{% for h in rhel_hosts %}{{ h }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Astra Linux - используем /usr/bin/python3 #}
|
||||
{% set astra_hosts = [] %}
|
||||
{% for h in hosts %}
|
||||
{% if h.family == 'astra' %}
|
||||
{% set _ = astra_hosts.append(h.name) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if astra_hosts %}
|
||||
[astra_family:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
[astra_family]
|
||||
{% for h in astra_hosts %}{{ h }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Глобальный fallback для остальных хостов #}
|
||||
[unmatched_hosts:vars]
|
||||
ansible_python_interpreter=auto_silent
|
||||
|
||||
- name: Ensure inventory directory exists
|
||||
file:
|
||||
path: "{{ generated_inventory | dirname }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Write inventory file
|
||||
copy:
|
||||
dest: "{{ generated_inventory }}"
|
||||
content: "{{ inv_content }}"
|
||||
mode: "0644"
|
||||
|
||||
- name: Display inventory summary
|
||||
debug:
|
||||
msg: |
|
||||
📋 Inventory Summary:
|
||||
- Total hosts: {{ hosts | length }}
|
||||
- Groups: {{ groups_map.keys() | list | join(', ') }}
|
||||
- Systemd nodes: {{ hosts | selectattr('type','undefined') | list | length }}
|
||||
- DinD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||
- DOoD nodes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dood') | list | length }}
|
||||
|
||||
# ---------- Kind clusters (если определены) ----------
|
||||
- name: Prepare kind cluster configs
|
||||
set_fact:
|
||||
kind_config_content: |
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
{% if (item.addons|default({})).ingress_nginx|default(false) %}
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: {{ item.ingress_host_http_port | default(8081) }}
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: {{ item.ingress_host_https_port | default(8443) }}
|
||||
protocol: TCP
|
||||
{% endif %}
|
||||
{% for i in range(item.workers | default(0)) %}
|
||||
- role: worker
|
||||
{% endfor %}
|
||||
networking:
|
||||
apiServerAddress: "0.0.0.0"
|
||||
apiServerPort: {{ item.api_port | default(0) }}
|
||||
loop: "{{ kind_clusters | default([]) }}"
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Create kind cluster configs
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ ansible_controller_container | default('ansible-controller') }}"
|
||||
command: >
|
||||
bash -c "
|
||||
mkdir -p /ansible/.kind;
|
||||
echo '{{ kind_config_content }}' > /ansible/.kind/{{ item.name }}.yaml
|
||||
"
|
||||
loop: "{{ kind_clusters | default([]) }}"
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Create kind clusters
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
if kind get clusters | grep -qx "$$n"; then
|
||||
echo "[kind] cluster $$n already exists";
|
||||
else
|
||||
echo "[kind] creating $$n";
|
||||
kind create cluster --name "$$n" --config "/ansible/.kind/$$n.yaml";
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Install Ingress NGINX, Metrics Server, Istio, Kiali, Prometheus Stack (per cluster, if enabled)
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
helm repo add kiali https://kiali.org/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true;
|
||||
helm repo update >/dev/null 2>&1 || true;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
# ingress-nginx
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("ingress_nginx", False) | to_json }}; then
|
||||
echo "[addons] ingress-nginx on $$n";
|
||||
kubectl --context kind-$$n apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml || true;
|
||||
kubectl --context kind-$$n -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s || true;
|
||||
fi
|
||||
# metrics-server
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("metrics_server", False) | to_json }}; then
|
||||
echo "[addons] metrics-server on $$n";
|
||||
kubectl --context kind-$$n apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml || true;
|
||||
kubectl --context kind-$$n -n kube-system patch deploy metrics-server -p \
|
||||
"{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"metrics-server\",\"args\":[\"--kubelet-insecure-tls\",\"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\"]}]}}}}}" || true;
|
||||
fi
|
||||
# istio (demo profile)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("istio", False) | to_json }}; then
|
||||
echo "[addons] istio (demo profile) on $$n";
|
||||
istioctl install -y --set profile=demo --context kind-$$n;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istiod --timeout=180s || true;
|
||||
kubectl --context kind-$$n -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s || true;
|
||||
fi
|
||||
# kiali (server chart, anonymous auth) — требует istio/metrics
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("kiali", False) | to_json }}; then
|
||||
echo "[addons] kiali on $$n";
|
||||
kubectl --context kind-$$n create ns istio-system >/dev/null 2>&1 || true;
|
||||
helm upgrade --install kiali-server kiali/kiali-server \
|
||||
--namespace istio-system --kube-context kind-$$n \
|
||||
--set auth.strategy=anonymous --wait --timeout 180s;
|
||||
fi
|
||||
# kube-prometheus-stack (Prometheus + Grafana)
|
||||
if {{ (kind_clusters | items2dict(key_name="name", value_name="addons")).get(n, {}).get("prometheus_stack", False) | to_json }}; then
|
||||
echo "[addons] kube-prometheus-stack on $$n";
|
||||
kubectl --context kind-$$n create ns monitoring >/dev/null 2>&1 || true;
|
||||
helm upgrade --install monitoring prometheus-community/kube-prometheus-stack \
|
||||
--namespace monitoring --kube-context kind-$$n \
|
||||
--set grafana.adminPassword=admin \
|
||||
--set grafana.defaultDashboardsTimezone=browser \
|
||||
--wait --timeout 600s;
|
||||
# дождаться графаны
|
||||
kubectl --context kind-$$n -n monitoring rollout status deploy/monitoring-grafana --timeout=300s || true;
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
|
||||
- name: Setup NodePort for addons
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
{% for cluster in kind_clusters | default([]) %}
|
||||
{% if cluster.addon_ports is defined %}
|
||||
if [ "$$n" = "{{ cluster.name }}" ]; then
|
||||
{% if cluster.addon_ports.prometheus is defined %}
|
||||
echo "[ports] Prometheus: {{ cluster.addon_ports.prometheus }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.prometheus }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.grafana is defined %}
|
||||
echo "[ports] Grafana: {{ cluster.addon_ports.grafana }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n monitoring monitoring-grafana --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.grafana }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
{% if cluster.addon_ports.kiali is defined %}
|
||||
echo "[ports] Kiali: {{ cluster.addon_ports.kiali }}";
|
||||
kubectl --context kind-{{ cluster.name }} patch svc -n istio-system kiali --type='json' -p='[{"op": "replace", "path": "/spec/type", "value":"NodePort"},{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{{ cluster.addon_ports.kiali }}}]' 2>/dev/null || true;
|
||||
{% endif %}
|
||||
fi
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
@@ -4,7 +4,8 @@
|
||||
vars:
|
||||
# Получаем preset из переменной окружения или используем default
|
||||
preset_name: "{{ lookup('env', 'MOLECULE_PRESET') | default('default') }}"
|
||||
preset_file: "/workspace/molecule/presets/{{ preset_name }}.yml"
|
||||
# Проверяем сначала в папке k8s, затем в основной папке presets
|
||||
preset_file: "{{ '/workspace/molecule/presets/k8s/' + preset_name + '.yml' if (preset_name in ['k8s-minimal', 'kubernetes', 'k8s-full'] or preset_name.startswith('k8s-')) else '/workspace/molecule/presets/' + preset_name + '.yml' }}"
|
||||
|
||||
# Fallback значения если preset файл не найден
|
||||
docker_network: labnet
|
||||
@@ -12,6 +13,7 @@
|
||||
- name: u1
|
||||
family: debian
|
||||
groups: [test]
|
||||
kind_clusters: []
|
||||
|
||||
tasks:
|
||||
- name: Load preset configuration
|
||||
@@ -74,10 +76,27 @@
|
||||
# Используем переменную hosts из загруженного пресета
|
||||
hosts: "{{ hosts }}"
|
||||
|
||||
- name: Remove kind clusters
|
||||
community.docker.docker_container_exec:
|
||||
container: ansible-controller
|
||||
command: >
|
||||
bash -lc '
|
||||
set -e;
|
||||
for n in {{ (kind_clusters | default([]) | map(attribute="name") | list) | map('quote') | join(' ') }}; do
|
||||
if kind get clusters | grep -qx "$$n"; then
|
||||
echo "[kind] deleting $$n";
|
||||
kind delete cluster --name "$$n" || true;
|
||||
fi
|
||||
done
|
||||
'
|
||||
when: (kind_clusters | default([])) | length > 0
|
||||
ignore_errors: true
|
||||
|
||||
- name: Display cleanup summary
|
||||
debug:
|
||||
msg: |
|
||||
🧹 Cleanup Summary:
|
||||
- Removed containers: {{ hosts | length }}
|
||||
- Removed DinD volumes: {{ hosts | selectattr('type','defined') | selectattr('type','equalto','dind') | list | length }}
|
||||
- Network: {{ docker_network }}
|
||||
- Network: {{ docker_network }}
|
||||
- Removed kind clusters: {{ kind_clusters | default([]) | length }}
|
||||
@@ -33,6 +33,20 @@ systemd_defaults:
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
|
||||
# Kind кластеры (опционально)
|
||||
# kind_clusters:
|
||||
# - name: lab
|
||||
# workers: 2
|
||||
# api_port: 6443
|
||||
# addons:
|
||||
# ingress_nginx: true
|
||||
# metrics_server: true
|
||||
# istio: true
|
||||
# kiali: true
|
||||
# prometheus_stack: true
|
||||
# ingress_host_http_port: 8081
|
||||
# ingress_host_https_port: 8443
|
||||
|
||||
hosts:
|
||||
# Стандартный набор - 3 хоста
|
||||
- name: u1
|
||||
|
||||
42
molecule/presets/k8s/k8s-minimal.yml
Normal file
42
molecule/presets/k8s/k8s-minimal.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
#description: Минимальный Kind кластер без аддонов
|
||||
# Автор: Сергей Антропов
|
||||
# Сайт: https://devops.org.ru
|
||||
|
||||
docker_network: labnet
|
||||
generated_inventory: "{{ molecule_ephemeral_directory }}/inventory/hosts.ini"
|
||||
|
||||
# systemd-ready образы
|
||||
images:
|
||||
alt: "inecs/ansible-lab:alt-linux-latest"
|
||||
astra: "inecs/ansible-lab:astra-linux-latest"
|
||||
rhel: "inecs/ansible-lab:rhel-latest"
|
||||
centos7: "inecs/ansible-lab:centos7-latest"
|
||||
centos8: "inecs/ansible-lab:centos8-latest"
|
||||
centos9: "inecs/ansible-lab:centos9-latest"
|
||||
alma: "inecs/ansible-lab:alma-latest"
|
||||
rocky: "inecs/ansible-lab:rocky-latest"
|
||||
redos: "inecs/ansible-lab:redos-latest"
|
||||
ubuntu20: "inecs/ansible-lab:ubuntu20-latest"
|
||||
ubuntu22: "inecs/ansible-lab:ubuntu22-latest"
|
||||
ubuntu24: "inecs/ansible-lab:ubuntu24-latest"
|
||||
debian9: "inecs/ansible-lab:debian9-latest"
|
||||
debian10: "inecs/ansible-lab:debian10-latest"
|
||||
debian11: "inecs/ansible-lab:debian11-latest"
|
||||
debian12: "inecs/ansible-lab:debian12-latest"
|
||||
|
||||
systemd_defaults:
|
||||
privileged: true
|
||||
command: "/sbin/init"
|
||||
volumes:
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
|
||||
# Минимальный Kind кластер без аддонов
|
||||
kind_clusters:
|
||||
- name: minimal
|
||||
workers: 0 # Только control-plane
|
||||
api_port: 6443
|
||||
|
||||
hosts: []
|
||||
59
molecule/presets/k8s/kubernetes.yml
Normal file
59
molecule/presets/k8s/kubernetes.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
#description: Пресет для тестирования с Kubernetes Kind кластером
|
||||
# Автор: Сергей Антропов
|
||||
# Сайт: https://devops.org.ru
|
||||
|
||||
docker_network: labnet
|
||||
generated_inventory: "{{ molecule_ephemeral_directory }}/inventory/hosts.ini"
|
||||
|
||||
# systemd-ready образы
|
||||
images:
|
||||
alt: "inecs/ansible-lab:alt-linux-latest"
|
||||
astra: "inecs/ansible-lab:astra-linux-latest"
|
||||
rhel: "inecs/ansible-lab:rhel-latest"
|
||||
centos7: "inecs/ansible-lab:centos7-latest"
|
||||
centos8: "inecs/ansible-lab:centos8-latest"
|
||||
centos9: "inecs/ansible-lab:centos9-latest"
|
||||
alma: "inecs/ansible-lab:alma-latest"
|
||||
rocky: "inecs/ansible-lab:rocky-latest"
|
||||
redos: "inecs/ansible-lab:redos-latest"
|
||||
ubuntu20: "inecs/ansible-lab:ubuntu20-latest"
|
||||
ubuntu22: "inecs/ansible-lab:ubuntu22-latest"
|
||||
ubuntu24: "inecs/ansible-lab:ubuntu24-latest"
|
||||
debian9: "inecs/ansible-lab:debian9-latest"
|
||||
debian10: "inecs/ansible-lab:debian10-latest"
|
||||
debian11: "inecs/ansible-lab:debian11-latest"
|
||||
debian12: "inecs/ansible-lab:debian12-latest"
|
||||
|
||||
systemd_defaults:
|
||||
privileged: true
|
||||
command: "/sbin/init"
|
||||
volumes:
|
||||
- "/sys/fs/cgroup:/sys/fs/cgroup:rw"
|
||||
tmpfs: ["/run", "/run/lock"]
|
||||
capabilities: ["SYS_ADMIN"]
|
||||
|
||||
# Kind кластеры с полным набором аддонов
|
||||
kind_clusters:
|
||||
- name: lab
|
||||
workers: 2
|
||||
api_port: 6443
|
||||
addons:
|
||||
ingress_nginx: true
|
||||
metrics_server: true
|
||||
istio: true
|
||||
kiali: true
|
||||
prometheus_stack: true
|
||||
ingress_host_http_port: 8081
|
||||
ingress_host_https_port: 8443
|
||||
# Порты для доступа к аддонам извне
|
||||
# Документация: https://devops.org.ru
|
||||
# Prometheus: http://localhost:9090
|
||||
# Grafana: http://localhost:3000 (admin/admin)
|
||||
# Kiali: http://localhost:20001
|
||||
addon_ports:
|
||||
prometheus: 9090
|
||||
grafana: 3000
|
||||
kiali: 20001
|
||||
|
||||
hosts: []
|
||||
161
scripts/create_k8s_cluster.py
Executable file
161
scripts/create_k8s_cluster.py
Executable file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Скрипт для создания Kind кластеров
|
||||
Автор: Сергей Антропов
|
||||
Сайт: https://devops.org.ru
|
||||
"""
|
||||
import sys
|
||||
import yaml
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
def run_cmd(cmd):
|
||||
"""Выполнить команду"""
|
||||
print(f"[run] {cmd}")
|
||||
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
print(f"[error] {result.stderr}")
|
||||
sys.exit(1)
|
||||
print(result.stdout)
|
||||
return result.stdout
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: create_k8s_cluster.py <preset_file> <container_name>")
|
||||
sys.exit(1)
|
||||
|
||||
preset_file = sys.argv[1]
|
||||
container_name = sys.argv[2]
|
||||
|
||||
print(f"📋 Читаю пресет: {preset_file}")
|
||||
with open(preset_file, 'r') as f:
|
||||
preset = yaml.safe_load(f)
|
||||
|
||||
kind_clusters = preset.get('kind_clusters', [])
|
||||
if not kind_clusters:
|
||||
print("⚠️ В пресете не определены kind кластеры")
|
||||
sys.exit(0)
|
||||
|
||||
os.makedirs("/ansible/.kind", exist_ok=True)
|
||||
|
||||
for cluster in kind_clusters:
|
||||
name = cluster['name']
|
||||
config_file = f"/ansible/.kind/{name}.yaml"
|
||||
|
||||
print(f"\n☸️ Создание конфигурации для кластера: {name}")
|
||||
|
||||
# Создаем конфигурацию Kind
|
||||
config = {
|
||||
'kind': 'Cluster',
|
||||
'apiVersion': 'kind.x-k8s.io/v1alpha4',
|
||||
'nodes': [
|
||||
{'role': 'control-plane'}
|
||||
],
|
||||
'networking': {
|
||||
'apiServerAddress': '0.0.0.0',
|
||||
'apiServerPort': cluster.get('api_port', 0)
|
||||
}
|
||||
}
|
||||
|
||||
# Добавляем extraPortMappings для ingress если нужно
|
||||
if cluster.get('addons', {}).get('ingress_nginx'):
|
||||
config['nodes'][0]['extraPortMappings'] = [
|
||||
{
|
||||
'containerPort': 80,
|
||||
'hostPort': cluster.get('ingress_host_http_port', 8081),
|
||||
'protocol': 'TCP'
|
||||
},
|
||||
{
|
||||
'containerPort': 443,
|
||||
'hostPort': cluster.get('ingress_host_https_port', 8443),
|
||||
'protocol': 'TCP'
|
||||
}
|
||||
]
|
||||
|
||||
# Добавляем worker nodes
|
||||
workers = cluster.get('workers', 0)
|
||||
for i in range(workers):
|
||||
config['nodes'].append({'role': 'worker'})
|
||||
|
||||
# Записываем конфигурацию
|
||||
with open(config_file, 'w') as f:
|
||||
yaml.dump(config, f)
|
||||
|
||||
print(f"✅ Конфигурация сохранена: {config_file}")
|
||||
|
||||
# Проверяем существование кластера
|
||||
result = subprocess.run(f"kind get clusters", shell=True, capture_output=True, text=True)
|
||||
existing = result.stdout.strip().split('\n') if result.returncode == 0 else []
|
||||
|
||||
if name in existing:
|
||||
print(f"⚠️ Кластер '{name}' уже существует, пропускаю")
|
||||
else:
|
||||
print(f"🚀 Создание кластера: {name}")
|
||||
run_cmd(f"kind create cluster --name {name} --config {config_file}")
|
||||
|
||||
# Устанавливаем аддоны
|
||||
addons = cluster.get('addons', {})
|
||||
if not addons:
|
||||
continue
|
||||
|
||||
print(f"\n📦 Установка аддонов для кластера: {name}")
|
||||
|
||||
if addons.get('ingress_nginx'):
|
||||
print(" - Installing ingress-nginx")
|
||||
run_cmd(f"kubectl --context kind-{name} apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml")
|
||||
run_cmd(f"kubectl --context kind-{name} -n ingress-nginx rollout status deploy/ingress-nginx-controller --timeout=180s")
|
||||
|
||||
if addons.get('metrics_server'):
|
||||
print(" - Installing metrics-server")
|
||||
run_cmd(f"kubectl --context kind-{name} apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml")
|
||||
patch_json = '{"spec":{"template":{"spec":{"containers":[{"name":"metrics-server","args":["--kubelet-insecure-tls","--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]}]}}}}'
|
||||
run_cmd(f"kubectl --context kind-{name} -n kube-system patch deploy metrics-server -p '{patch_json}'")
|
||||
|
||||
if addons.get('istio'):
|
||||
print(" - Installing Istio")
|
||||
run_cmd(f"istioctl install -y --set profile=demo --context kind-{name}")
|
||||
run_cmd(f"kubectl --context kind-{name} -n istio-system rollout status deploy/istiod --timeout=180s")
|
||||
run_cmd(f"kubectl --context kind-{name} -n istio-system rollout status deploy/istio-ingressgateway --timeout=180s")
|
||||
|
||||
if addons.get('kiali'):
|
||||
print(" - Installing Kiali")
|
||||
run_cmd(f"kubectl --context kind-{name} create ns istio-system")
|
||||
run_cmd(f"helm upgrade --install kiali-server kiali/kiali-server --namespace istio-system --kube-context kind-{name} --set auth.strategy=anonymous --wait --timeout 180s")
|
||||
|
||||
if addons.get('prometheus_stack'):
|
||||
print(" - Installing Prometheus Stack")
|
||||
run_cmd(f"helm repo add prometheus-community https://prometheus-community.github.io/helm-charts")
|
||||
run_cmd(f"helm repo update")
|
||||
run_cmd(f"kubectl --context kind-{name} create ns monitoring")
|
||||
run_cmd(f"helm upgrade --install monitoring prometheus-community/kube-prometheus-stack --namespace monitoring --kube-context kind-{name} --set grafana.adminPassword=admin --set grafana.defaultDashboardsTimezone=browser --wait --timeout 600s")
|
||||
run_cmd(f"kubectl --context kind-{name} -n monitoring rollout status deploy/monitoring-grafana --timeout=300s")
|
||||
|
||||
# Настраиваем NodePort для аддонов
|
||||
addon_ports = cluster.get('addon_ports', {})
|
||||
if addon_ports:
|
||||
print("\n🔌 Настройка NodePort для аддонов")
|
||||
|
||||
if 'prometheus' in addon_ports:
|
||||
port = addon_ports['prometheus']
|
||||
print(f" - Prometheus: {port}")
|
||||
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
|
||||
run_cmd(f"kubectl --context kind-{name} patch svc -n monitoring monitoring-kube-prom-prometheus --type='json' -p='{patch_json}'")
|
||||
|
||||
if 'grafana' in addon_ports:
|
||||
port = addon_ports['grafana']
|
||||
print(f" - Grafana: {port}")
|
||||
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
|
||||
run_cmd(f"kubectl --context kind-{name} patch svc -n monitoring monitoring-grafana --type='json' -p='{patch_json}'")
|
||||
|
||||
if 'kiali' in addon_ports:
|
||||
port = addon_ports['kiali']
|
||||
print(f" - Kiali: {port}")
|
||||
patch_json = f'[{{"op": "replace", "path": "/spec/type", "value":"NodePort"}},{{"op": "replace", "path": "/spec/ports/0/nodePort", "value":{port}}}]'
|
||||
run_cmd(f"kubectl --context kind-{name} patch svc -n istio-system kiali --type='json' -p='{patch_json}'")
|
||||
|
||||
print(f"✅ Кластер '{name}' готов!")
|
||||
|
||||
print("\n🎉 Все кластеры созданы!")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user