feat: добавить аддоны Harbor, Gitea, ownCloud OCIS, Nextcloud

Harbor (harbor/harbor, авто-версия):
- Values-шаблон с полной конфигурацией expose/ingress/tls/persistence
- external DB при addon_postgresql=true (Job для создания user/db)
- internal PostgreSQL + Redis в противном случае
- Метрики + ServiceMonitor при addon_prometheus_stack=true

Gitea (gitea-charts/gitea, авто-версия):
- Values-шаблон, встроенный PostgreSQL отключается при addon_postgresql=true
- Job для создания dedicated user/db в shared PostgreSQL
- Опциональный SSH NodePort (gitea_ssh_enabled)
- ServiceMonitor при addon_prometheus_stack=true

ownCloud OCIS (owncloud/ocis, авто-версия):
- Values-шаблон с insecure-режимом для HTTP
- Persistence для storageusers/storagesystem/nats/search/thumbnails
- Пароль admin через extraEnv IDM_ADMIN_PASSWORD
- ServiceMonitor при addon_prometheus_stack=true

Nextcloud (nextcloud/nextcloud, авто-версия):
- Values-шаблон, external PostgreSQL при addon_postgresql=true
- Job для создания dedicated user/db в shared PostgreSQL
- Встроенный Redis для file locking
- nextcloud-exporter (metrics sidecar) + ServiceMonitor
- Cron-задача для фоновых операций

Авто-версия: helm search repo ... --output json | from_json[0].version
Применяется при version: "" — переопределяется через ARGS="-e *_version=X.Y.Z"
This commit is contained in:
Sergey Antropoff
2026-04-25 11:49:29 +03:00
parent c24b8af395
commit e1e84aeb86
19 changed files with 1071 additions and 0 deletions

View File

@@ -0,0 +1,7 @@
---
- name: Install ugitea
hosts: k3s_master[0]
gather_facts: false
become: true
roles:
- role: "{{ playbook_dir }}/role"

View File

@@ -0,0 +1,46 @@
---
gitea_version: "" # "" = автоматически последняя версия чарта
gitea_namespace: "gitea"
gitea_chart_repo: "https://dl.gitea.com/charts/"
# Администратор
gitea_admin_username: "gitea"
gitea_admin_password: "{{ vault_gitea_admin_password | default('changeme-gitea') }}"
gitea_admin_email: "admin@example.com"
# Ingress
gitea_ingress_enabled: true
gitea_ingress_host: "gitea.local"
gitea_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
gitea_ingress_tls: false
gitea_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
# SSH (опционально — для git clone по SSH)
gitea_ssh_enabled: false
gitea_ssh_service_type: "NodePort"
gitea_ssh_node_port: 30022
# Хранилище репозиториев
gitea_storage_size: "10Gi"
gitea_storage_class: ""
# База данных
# При addon_postgresql: true — создаётся отдельный user/db в shared PostgreSQL
# При addon_postgresql: false — используется встроенная PostgreSQL чарта
gitea_db_name: "gitea"
gitea_db_username: "gitea"
gitea_db_password: "{{ vault_gitea_db_password | default('changeme-gitea') }}"
gitea_postgresql_admin_password: "{{ vault_postgresql_postgres_password | default('changeme-postgres') }}"
# Метрики (встроенный /metrics эндпоинт Gitea)
gitea_metrics_enabled: true
gitea_metrics_token: "" # опциональный токен для защиты /metrics
# ServiceMonitor создаётся только когда addon_prometheus_stack: true
gitea_resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi

View File

@@ -0,0 +1,121 @@
---
- name: Add Gitea Helm repo
kubernetes.core.helm_repository:
name: gitea-charts
repo_url: "{{ gitea_chart_repo }}"
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Fetch latest Gitea chart version
ansible.builtin.command: helm search repo gitea-charts/gitea --output json
register: _gitea_chart_search
changed_when: false
when: gitea_version == ""
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Set effective Gitea chart version
ansible.builtin.set_fact:
_gitea_chart_version: >-
{{ gitea_version if gitea_version != '' else
(_gitea_chart_search.stdout | from_json)[0].version }}
- name: Show Gitea chart version that will be installed
ansible.builtin.debug:
msg: "Устанавливаю Gitea chart {{ _gitea_chart_version }}"
- name: Create dedicated PostgreSQL user and database for Gitea
kubernetes.core.k8s:
state: present
definition:
apiVersion: batch/v1
kind: Job
metadata:
name: gitea-pg-provision
namespace: "{{ postgresql_namespace | default('postgresql') }}"
spec:
ttlSecondsAfterFinished: 300
template:
spec:
restartPolicy: OnFailure
containers:
- name: psql
image: postgres:16-alpine
command:
- /bin/sh
- -c
- |
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c "
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '${DB_USER}') THEN
CREATE USER ${DB_USER} WITH PASSWORD '${DB_PASS}';
END IF;
END \$\$;
" &&
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -tc \
"SELECT 1 FROM pg_database WHERE datname = '${DB_NAME}'" \
| grep -q 1 || \
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c \
"CREATE DATABASE ${DB_NAME} OWNER ${DB_USER};"
env:
- name: HOST
value: "{{ postgresql_external_host }}"
- name: ADMIN_PASS
value: "{{ gitea_postgresql_admin_password }}"
- name: DB_USER
value: "{{ gitea_db_username }}"
- name: DB_PASS
value: "{{ gitea_db_password }}"
- name: DB_NAME
value: "{{ gitea_db_name }}"
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
when: addon_postgresql | default(false) | bool
- name: Wait for Gitea PostgreSQL provision Job to complete
ansible.builtin.command: >
k3s kubectl -n {{ postgresql_namespace | default('postgresql') }}
wait job/gitea-pg-provision
--for=condition=complete --timeout=120s
changed_when: false
when: addon_postgresql | default(false) | bool
- name: Template Gitea values
ansible.builtin.template:
src: gitea-values.yaml.j2
dest: /tmp/gitea-values.yaml
mode: '0644'
- name: Install Gitea via Helm
kubernetes.core.helm:
name: gitea
chart_ref: gitea-charts/gitea
chart_version: "{{ _gitea_chart_version }}"
release_namespace: "{{ gitea_namespace }}"
create_namespace: true
wait: true
timeout: "10m0s"
values_files:
- /tmp/gitea-values.yaml
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Wait for Gitea to be ready
ansible.builtin.command: >
k3s kubectl -n {{ gitea_namespace }}
rollout status statefulset/gitea --timeout=180s
changed_when: false
retries: 3
delay: 10
- name: Show Gitea access info
ansible.builtin.debug:
msg:
- "Gitea установлен в namespace: {{ gitea_namespace }}"
- "URL: http{{ 's' if gitea_ingress_tls else '' }}://{{ gitea_ingress_host }}"
- "Логин: {{ gitea_admin_username }}"
- "Пароль: {{ gitea_admin_password }}"
- "БД: {{ 'PostgreSQL ' + postgresql_external_host if addon_postgresql | default(false) | bool else 'встроенная PostgreSQL' }}"
- "{% if gitea_ssh_enabled %}SSH клон: git clone ssh://git@{{ gitea_ingress_host }}:{{ gitea_ssh_node_port }}/user/repo.git{% else %}SSH отключён — клонирование только по HTTP{% endif %}"
- "Для обновления до новой версии: make addon-gitea (gitea_version='' → автопоиск)"

View File

@@ -0,0 +1,99 @@
## Gitea Helm values — Ansible managed
gitea:
admin:
username: "{{ gitea_admin_username }}"
password: "{{ gitea_admin_password }}"
email: "{{ gitea_admin_email }}"
config:
server:
DOMAIN: "{{ gitea_ingress_host }}"
ROOT_URL: "http{{ 's' if gitea_ingress_tls else '' }}://{{ gitea_ingress_host }}/"
SSH_DOMAIN: "{{ gitea_ingress_host }}"
SSH_LISTEN_PORT: 22
SSH_PORT: "{{ gitea_ssh_node_port if gitea_ssh_enabled else 22 }}"
DISABLE_SSH: "{{ 'false' if gitea_ssh_enabled else 'true' }}"
{% if addon_postgresql | default(false) | bool %}
database:
DB_TYPE: postgres
HOST: "{{ postgresql_external_host }}:{{ postgresql_external_port }}"
NAME: "{{ gitea_db_name }}"
USER: "{{ gitea_db_username }}"
PASSWD: "{{ gitea_db_password }}"
SSL_MODE: disable
{% endif %}
metrics:
ENABLED: "{{ gitea_metrics_enabled | lower }}"
{% if gitea_metrics_token %}
TOKEN: "{{ gitea_metrics_token }}"
{% endif %}
cache:
ADAPTER: memory
persistence:
enabled: true
size: "{{ gitea_storage_size }}"
{% if gitea_storage_class %}
storageClass: "{{ gitea_storage_class }}"
{% endif %}
# Встроенный PostgreSQL чарта — включаем только если addon_postgresql: false
postgresql:
enabled: {{ (not (addon_postgresql | default(false) | bool)) | lower }}
primary:
persistence:
size: "2Gi"
{% if gitea_storage_class %}
storageClass: "{{ gitea_storage_class }}"
{% endif %}
postgresql-ha:
enabled: false
resources:
requests:
cpu: "{{ gitea_resources.requests.cpu }}"
memory: "{{ gitea_resources.requests.memory }}"
limits:
cpu: "{{ gitea_resources.limits.cpu }}"
memory: "{{ gitea_resources.limits.memory }}"
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
ingress:
enabled: {{ gitea_ingress_enabled | lower }}
{% if gitea_ingress_enabled %}
className: "{{ gitea_ingress_class }}"
hosts:
- host: "{{ gitea_ingress_host }}"
paths:
- path: /
pathType: Prefix
{% if gitea_ingress_tls %}
tls:
- secretName: gitea-tls
hosts:
- "{{ gitea_ingress_host }}"
annotations:
cert-manager.io/cluster-issuer: "{{ gitea_ingress_cert_issuer }}"
{% endif %}
{% endif %}
service:
ssh:
type: "{{ gitea_ssh_service_type if gitea_ssh_enabled else 'ClusterIP' }}"
{% if gitea_ssh_enabled and gitea_ssh_service_type == 'NodePort' %}
nodePort: {{ gitea_ssh_node_port }}
{% endif %}
serviceMonitor:
enabled: {{ (gitea_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool) | lower }}
additionalLabels:
release: kube-prometheus-stack

View File

@@ -0,0 +1,7 @@
---
- name: Install uharbor
hosts: k3s_master[0]
gather_facts: false
become: true
roles:
- role: "{{ playbook_dir }}/role"

View File

@@ -0,0 +1,47 @@
---
harbor_version: "" # "" = автоматически последняя версия чарта
harbor_namespace: "harbor"
harbor_chart_repo: "https://helm.goharbor.io"
# Пароль администратора — задай в vault.yml: vault_harbor_admin_password
harbor_admin_password: "{{ vault_harbor_admin_password | default('Harbor12345') }}"
# Ingress (обязателен — Harbor требует корректный externalURL)
harbor_ingress_enabled: true
harbor_ingress_host: "harbor.local"
harbor_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
harbor_ingress_tls: false
harbor_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
# Хранилище
harbor_registry_storage_size: "20Gi"
harbor_jobservice_storage_size: "1Gi"
harbor_trivy_storage_size: "5Gi"
harbor_storage_class: "" # "" = default StorageClass
# База данных
# internal — встроенная PostgreSQL (1Gi PVC)
# external — addon_postgresql (автоматически при addon_postgresql: true)
harbor_database_type: "{{ 'external' if addon_postgresql | default(false) | bool else 'internal' }}"
harbor_database_storage_size: "1Gi" # используется только при type: internal
harbor_db_host: "{{ postgresql_external_host | default('') }}"
harbor_db_port: "{{ postgresql_external_port | default(5432) }}"
harbor_db_name: "harbor"
harbor_db_username: "harbor"
harbor_db_password: "{{ vault_harbor_db_password | default('changeme-harbor') }}"
harbor_postgresql_admin_password: "{{ vault_postgresql_postgres_password | default('changeme-postgres') }}"
# Redis — всегда internal (отдельного Redis-аддона нет)
harbor_redis_storage_size: "1Gi"
# Метрики
harbor_metrics_enabled: true
# ServiceMonitor создаётся только когда addon_prometheus_stack: true
harbor_resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 1000m
memory: 1Gi

View File

@@ -0,0 +1,112 @@
---
- name: Add Harbor Helm repo
kubernetes.core.helm_repository:
name: harbor
repo_url: "{{ harbor_chart_repo }}"
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Fetch latest Harbor chart version
ansible.builtin.command: helm search repo harbor/harbor --output json
register: _harbor_chart_search
changed_when: false
when: harbor_version == ""
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Set effective Harbor chart version
ansible.builtin.set_fact:
_harbor_chart_version: >-
{{ harbor_version if harbor_version != '' else
(_harbor_chart_search.stdout | from_json)[0].version }}
- name: Show Harbor chart version that will be installed
ansible.builtin.debug:
msg: "Устанавливаю Harbor chart {{ _harbor_chart_version }}"
- name: Create dedicated PostgreSQL user and database for Harbor
kubernetes.core.k8s:
state: present
definition:
apiVersion: batch/v1
kind: Job
metadata:
name: harbor-pg-provision
namespace: "{{ postgresql_namespace | default('postgresql') }}"
spec:
ttlSecondsAfterFinished: 300
template:
spec:
restartPolicy: OnFailure
containers:
- name: psql
image: postgres:16-alpine
command:
- /bin/sh
- -c
- |
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c "
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '${DB_USER}') THEN
CREATE USER ${DB_USER} WITH PASSWORD '${DB_PASS}';
END IF;
END \$\$;
" &&
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -tc \
"SELECT 1 FROM pg_database WHERE datname = '${DB_NAME}'" \
| grep -q 1 || \
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c \
"CREATE DATABASE ${DB_NAME} OWNER ${DB_USER};"
env:
- name: HOST
value: "{{ harbor_db_host }}"
- name: ADMIN_PASS
value: "{{ harbor_postgresql_admin_password }}"
- name: DB_USER
value: "{{ harbor_db_username }}"
- name: DB_PASS
value: "{{ harbor_db_password }}"
- name: DB_NAME
value: "{{ harbor_db_name }}"
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
when: harbor_database_type == 'external'
- name: Wait for Harbor PostgreSQL provision Job to complete
ansible.builtin.command: >
k3s kubectl -n {{ postgresql_namespace | default('postgresql') }}
wait job/harbor-pg-provision
--for=condition=complete --timeout=120s
changed_when: false
when: harbor_database_type == 'external'
- name: Template Harbor values
ansible.builtin.template:
src: harbor-values.yaml.j2
dest: /tmp/harbor-values.yaml
mode: '0644'
- name: Install Harbor via Helm
kubernetes.core.helm:
name: harbor
chart_ref: harbor/harbor
chart_version: "{{ _harbor_chart_version }}"
release_namespace: "{{ harbor_namespace }}"
create_namespace: true
wait: true
timeout: "15m0s"
values_files:
- /tmp/harbor-values.yaml
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Show Harbor access info
ansible.builtin.debug:
msg:
- "Harbor установлен в namespace: {{ harbor_namespace }}"
- "URL: http{{ 's' if harbor_ingress_tls else '' }}://{{ harbor_ingress_host }}"
- "Логин: admin"
- "Пароль: {{ harbor_admin_password }}"
- "Docker login: docker login {{ harbor_ingress_host }} -u admin"
- "БД: {{ harbor_database_type }}{{ ' (PostgreSQL ' + harbor_db_host + ')' if harbor_database_type == 'external' else '' }}"

View File

@@ -0,0 +1,88 @@
## Harbor Helm values — Ansible managed
expose:
type: ingress
ingress:
hosts:
core: "{{ harbor_ingress_host }}"
className: "{{ harbor_ingress_class }}"
{% if harbor_ingress_tls %}
annotations:
cert-manager.io/cluster-issuer: "{{ harbor_ingress_cert_issuer }}"
tls:
enabled: true
certSource: secret
secret:
secretName: harbor-tls
{% else %}
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
tls:
enabled: false
{% endif %}
externalURL: "http{{ 's' if harbor_ingress_tls else '' }}://{{ harbor_ingress_host }}"
harborAdminPassword: "{{ harbor_admin_password }}"
persistence:
enabled: true
persistentVolumeClaim:
registry:
size: "{{ harbor_registry_storage_size }}"
{% if harbor_storage_class %}
storageClass: "{{ harbor_storage_class }}"
{% endif %}
jobservice:
jobLog:
size: "{{ harbor_jobservice_storage_size }}"
{% if harbor_storage_class %}
storageClass: "{{ harbor_storage_class }}"
{% endif %}
database:
size: "{{ harbor_database_storage_size }}"
{% if harbor_storage_class %}
storageClass: "{{ harbor_storage_class }}"
{% endif %}
redis:
size: "{{ harbor_redis_storage_size }}"
{% if harbor_storage_class %}
storageClass: "{{ harbor_storage_class }}"
{% endif %}
trivy:
size: "{{ harbor_trivy_storage_size }}"
{% if harbor_storage_class %}
storageClass: "{{ harbor_storage_class }}"
{% endif %}
database:
type: "{{ harbor_database_type }}"
external:
host: "{{ harbor_db_host }}"
port: "{{ harbor_db_port | string }}"
username: "{{ harbor_db_username }}"
password: "{{ harbor_db_password }}"
coreDatabase: "{{ harbor_db_name }}"
redis:
type: internal
metrics:
enabled: {{ harbor_metrics_enabled | lower }}
serviceMonitor:
enabled: {{ (harbor_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool) | lower }}
additionalLabels:
release: kube-prometheus-stack
core:
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
registry:
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"

View File

@@ -0,0 +1,7 @@
---
- name: Install unextcloud
hosts: k3s_master[0]
gather_facts: false
become: true
roles:
- role: "{{ playbook_dir }}/role"

View File

@@ -0,0 +1,45 @@
---
nextcloud_version: "" # "" = автоматически последняя версия чарта
nextcloud_namespace: "nextcloud"
nextcloud_chart_repo: "https://nextcloud.github.io/helm/"
# Администратор
nextcloud_admin_username: "admin"
nextcloud_admin_password: "{{ vault_nextcloud_admin_password | default('changeme-nextcloud') }}"
# Ingress
nextcloud_ingress_enabled: true
nextcloud_ingress_host: "nextcloud.local"
nextcloud_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
nextcloud_ingress_tls: false
nextcloud_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
# Хранилище файлов
nextcloud_storage_size: "20Gi"
nextcloud_storage_class: ""
# База данных
# При addon_postgresql: true — создаётся отдельный user/db в shared PostgreSQL
# При addon_postgresql: false — встроенный SQLite (не рекомендуется для prod)
nextcloud_db_name: "nextcloud"
nextcloud_db_username: "nextcloud"
nextcloud_db_password: "{{ vault_nextcloud_db_password | default('changeme-nextcloud') }}"
nextcloud_postgresql_admin_password: "{{ vault_postgresql_postgres_password | default('changeme-postgres') }}"
# Redis — встроенный (для file locking и кэширования)
nextcloud_redis_enabled: true
# Автоматическое обновление Nextcloud (cron-задача внутри пода)
nextcloud_cronjob_enabled: true
# Метрики (nextcloud-exporter — отдельный sidecar)
nextcloud_metrics_enabled: true
# ServiceMonitor создаётся только когда addon_prometheus_stack: true
nextcloud_resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 1000m
memory: 1Gi

View File

@@ -0,0 +1,120 @@
---
- name: Add Nextcloud Helm repo
kubernetes.core.helm_repository:
name: nextcloud
repo_url: "{{ nextcloud_chart_repo }}"
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Fetch latest Nextcloud chart version
ansible.builtin.command: helm search repo nextcloud/nextcloud --output json
register: _nextcloud_chart_search
changed_when: false
when: nextcloud_version == ""
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Set effective Nextcloud chart version
ansible.builtin.set_fact:
_nextcloud_chart_version: >-
{{ nextcloud_version if nextcloud_version != '' else
(_nextcloud_chart_search.stdout | from_json)[0].version }}
- name: Show Nextcloud chart version that will be installed
ansible.builtin.debug:
msg: "Устанавливаю Nextcloud chart {{ _nextcloud_chart_version }}"
- name: Create dedicated PostgreSQL user and database for Nextcloud
kubernetes.core.k8s:
state: present
definition:
apiVersion: batch/v1
kind: Job
metadata:
name: nextcloud-pg-provision
namespace: "{{ postgresql_namespace | default('postgresql') }}"
spec:
ttlSecondsAfterFinished: 300
template:
spec:
restartPolicy: OnFailure
containers:
- name: psql
image: postgres:16-alpine
command:
- /bin/sh
- -c
- |
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c "
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '${DB_USER}') THEN
CREATE USER ${DB_USER} WITH PASSWORD '${DB_PASS}';
END IF;
END \$\$;
" &&
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -tc \
"SELECT 1 FROM pg_database WHERE datname = '${DB_NAME}'" \
| grep -q 1 || \
PGPASSWORD="$ADMIN_PASS" psql -h "$HOST" -U postgres -c \
"CREATE DATABASE ${DB_NAME} OWNER ${DB_USER};"
env:
- name: HOST
value: "{{ postgresql_external_host }}"
- name: ADMIN_PASS
value: "{{ nextcloud_postgresql_admin_password }}"
- name: DB_USER
value: "{{ nextcloud_db_username }}"
- name: DB_PASS
value: "{{ nextcloud_db_password }}"
- name: DB_NAME
value: "{{ nextcloud_db_name }}"
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
when: addon_postgresql | default(false) | bool
- name: Wait for Nextcloud PostgreSQL provision Job to complete
ansible.builtin.command: >
k3s kubectl -n {{ postgresql_namespace | default('postgresql') }}
wait job/nextcloud-pg-provision
--for=condition=complete --timeout=120s
changed_when: false
when: addon_postgresql | default(false) | bool
- name: Template Nextcloud values
ansible.builtin.template:
src: nextcloud-values.yaml.j2
dest: /tmp/nextcloud-values.yaml
mode: '0644'
- name: Install Nextcloud via Helm
kubernetes.core.helm:
name: nextcloud
chart_ref: nextcloud/nextcloud
chart_version: "{{ _nextcloud_chart_version }}"
release_namespace: "{{ nextcloud_namespace }}"
create_namespace: true
wait: true
timeout: "15m0s"
values_files:
- /tmp/nextcloud-values.yaml
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Wait for Nextcloud to be ready
ansible.builtin.command: >
k3s kubectl -n {{ nextcloud_namespace }}
rollout status deployment/nextcloud --timeout=300s
changed_when: false
retries: 3
delay: 15
- name: Show Nextcloud access info
ansible.builtin.debug:
msg:
- "Nextcloud установлен в namespace: {{ nextcloud_namespace }}"
- "URL: http{{ 's' if nextcloud_ingress_tls else '' }}://{{ nextcloud_ingress_host }}"
- "Логин: {{ nextcloud_admin_username }}"
- "Пароль: {{ nextcloud_admin_password }}"
- "БД: {{ 'PostgreSQL ' + postgresql_external_host if addon_postgresql | default(false) | bool else 'встроенный SQLite (только для тестов!)' }}"
- "Для обновления до новой версии: make addon-nextcloud (nextcloud_version='' → автопоиск)"

View File

@@ -0,0 +1,111 @@
## Nextcloud Helm values — Ansible managed
nextcloud:
host: "{{ nextcloud_ingress_host }}"
username: "{{ nextcloud_admin_username }}"
password: "{{ nextcloud_admin_password }}"
# Доверенные домены
configs:
custom.config.php: |-
<?php
$CONFIG = array(
'trusted_domains' => ['{{ nextcloud_ingress_host }}'],
'overwrite.cli.url' => 'http{{ "s" if nextcloud_ingress_tls else "" }}://{{ nextcloud_ingress_host }}',
'overwriteprotocol' => '{{ "https" if nextcloud_ingress_tls else "http" }}',
'default_phone_region' => 'RU',
);
# Cron через sidecar
extraSidecarContainers: []
persistence:
enabled: true
accessMode: ReadWriteOnce
size: "{{ nextcloud_storage_size }}"
{% if nextcloud_storage_class %}
storageClass: "{{ nextcloud_storage_class }}"
{% endif %}
{% if addon_postgresql | default(false) | bool %}
internalDatabase:
enabled: false
externalDatabase:
enabled: true
type: postgresql
host: "{{ postgresql_external_host }}"
port: {{ postgresql_external_port }}
user: "{{ nextcloud_db_username }}"
password: "{{ nextcloud_db_password }}"
database: "{{ nextcloud_db_name }}"
{% else %}
internalDatabase:
enabled: true # SQLite — только для тестов, не для prod
externalDatabase:
enabled: false
{% endif %}
redis:
enabled: {{ nextcloud_redis_enabled | lower }}
auth:
enabled: false
master:
resources:
requests:
cpu: 25m
memory: 32Mi
limits:
cpu: 100m
memory: 64Mi
cronjob:
enabled: {{ nextcloud_cronjob_enabled | lower }}
resources:
requests:
cpu: "{{ nextcloud_resources.requests.cpu }}"
memory: "{{ nextcloud_resources.requests.memory }}"
limits:
cpu: "{{ nextcloud_resources.limits.cpu }}"
memory: "{{ nextcloud_resources.limits.memory }}"
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
ingress:
enabled: {{ nextcloud_ingress_enabled | lower }}
{% if nextcloud_ingress_enabled %}
className: "{{ nextcloud_ingress_class }}"
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
{% if nextcloud_ingress_tls %}
cert-manager.io/cluster-issuer: "{{ nextcloud_ingress_cert_issuer }}"
{% endif %}
tls:
{% if nextcloud_ingress_tls %}
- secretName: nextcloud-tls
hosts:
- "{{ nextcloud_ingress_host }}"
{% else %}
[]
{% endif %}
{% endif %}
metrics:
enabled: {{ nextcloud_metrics_enabled | lower }}
serviceMonitor:
enabled: {{ (nextcloud_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool) | lower }}
additionalLabels:
release: kube-prometheus-stack
resources:
requests:
cpu: 10m
memory: 16Mi
limits:
cpu: 50m
memory: 32Mi

View File

@@ -0,0 +1,7 @@
---
- name: Install uowncloud
hosts: k3s_master[0]
gather_facts: false
become: true
roles:
- role: "{{ playbook_dir }}/role"

View File

@@ -0,0 +1,24 @@
---
# ownCloud Infinite Scale (OCIS) — облачное хранилище нового поколения (Go)
owncloud_version: "" # "" = автоматически последняя версия чарта
owncloud_namespace: "owncloud"
owncloud_chart_repo: "https://charts.owncloud.com/stable/"
# Администратор
owncloud_admin_password: "{{ vault_owncloud_admin_password | default('changeme-owncloud') }}"
# Ingress (обязателен — OCIS требует externalDomain)
owncloud_ingress_enabled: true
owncloud_ingress_host: "cloud.local"
owncloud_ingress_class: "{{ ingress_nginx_class_name | default('nginx') }}"
owncloud_ingress_tls: false
owncloud_ingress_cert_issuer: "{{ cert_manager_default_issuer_name | default('letsencrypt-prod') }}"
# Хранилище — основное
owncloud_storage_users_size: "20Gi" # данные пользователей
owncloud_storage_system_size: "5Gi" # системные данные (метаданные, индекс)
owncloud_storage_class: ""
# Метрики (OCIS exposes /metrics на порту 9464)
owncloud_metrics_enabled: true
# ServiceMonitor создаётся только когда addon_prometheus_stack: true

View File

@@ -0,0 +1,78 @@
---
- name: Add ownCloud Helm repo
kubernetes.core.helm_repository:
name: owncloud
repo_url: "{{ owncloud_chart_repo }}"
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Fetch latest ownCloud OCIS chart version
ansible.builtin.command: helm search repo owncloud/ocis --output json
register: _owncloud_chart_search
changed_when: false
when: owncloud_version == ""
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Set effective ownCloud chart version
ansible.builtin.set_fact:
_owncloud_chart_version: >-
{{ owncloud_version if owncloud_version != '' else
(_owncloud_chart_search.stdout | from_json)[0].version }}
- name: Show ownCloud chart version that will be installed
ansible.builtin.debug:
msg: "Устанавливаю ownCloud OCIS chart {{ _owncloud_chart_version }}"
- name: Template ownCloud values
ansible.builtin.template:
src: owncloud-values.yaml.j2
dest: /tmp/owncloud-values.yaml
mode: '0644'
- name: Install ownCloud OCIS via Helm
kubernetes.core.helm:
name: ocis
chart_ref: owncloud/ocis
chart_version: "{{ _owncloud_chart_version }}"
release_namespace: "{{ owncloud_namespace }}"
create_namespace: true
wait: true
timeout: "10m0s"
values_files:
- /tmp/owncloud-values.yaml
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
- name: Create ownCloud metrics ServiceMonitor
kubernetes.core.k8s:
state: present
definition:
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: owncloud-ocis
namespace: "{{ owncloud_namespace }}"
labels:
release: kube-prometheus-stack
spec:
selector:
matchLabels:
app.kubernetes.io/name: ocis
endpoints:
- port: metrics
path: /metrics
interval: 30s
environment:
KUBECONFIG: "{{ k3s_kubeconfig_path }}"
when: owncloud_metrics_enabled | bool and addon_prometheus_stack | default(false) | bool
- name: Show ownCloud access info
ansible.builtin.debug:
msg:
- "ownCloud OCIS установлен в namespace: {{ owncloud_namespace }}"
- "URL: http{{ 's' if owncloud_ingress_tls else '' }}://{{ owncloud_ingress_host }}"
- "Логин: admin"
- "Пароль: {{ owncloud_admin_password }}"
- "Хранилище пользователей: {{ owncloud_storage_users_size }}"
- "Для обновления до новой версии: make addon-owncloud (owncloud_version='' → автопоиск)"

View File

@@ -0,0 +1,84 @@
## ownCloud Infinite Scale (OCIS) Helm values — Ansible managed
# Публичный домен — без протокола
externalDomain: "{{ owncloud_ingress_host }}"
# При HTTP (без TLS) — разрешаем небезопасные соединения для OIDC
insecure:
oidcIdpInsecure: {{ (not owncloud_ingress_tls) | lower }}
ocisHttpApiInsecure: {{ (not owncloud_ingress_tls) | lower }}
ingress:
enabled: {{ owncloud_ingress_enabled | lower }}
{% if owncloud_ingress_enabled %}
ingressClassName: "{{ owncloud_ingress_class }}"
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
{% if owncloud_ingress_tls %}
cert-manager.io/cluster-issuer: "{{ owncloud_ingress_cert_issuer }}"
tls:
- secretName: owncloud-tls
hosts:
- "{{ owncloud_ingress_host }}"
{% endif %}
{% endif %}
# Начальный пароль администратора (пользователь: admin)
features:
basicAuthentication:
enabled: true
# Пароль через ConfigMap env (переменная OCIS_ADMIN_USER_PASSWORD)
extraEnv:
- name: OCIS_ADMIN_USER_PASSWORD
value: "{{ owncloud_admin_password }}"
- name: IDM_ADMIN_PASSWORD
value: "{{ owncloud_admin_password }}"
- name: OCIS_PASSWORD_POLICY_DISABLE_CHECK
value: "true"
services:
storageusers:
persistence:
enabled: true
size: "{{ owncloud_storage_users_size }}"
{% if owncloud_storage_class %}
storageClass: "{{ owncloud_storage_class }}"
{% endif %}
storagesystem:
persistence:
enabled: true
size: "{{ owncloud_storage_system_size }}"
{% if owncloud_storage_class %}
storageClass: "{{ owncloud_storage_class }}"
{% endif %}
nats:
persistence:
enabled: true
size: "1Gi"
{% if owncloud_storage_class %}
storageClass: "{{ owncloud_storage_class }}"
{% endif %}
search:
persistence:
enabled: true
size: "2Gi"
{% if owncloud_storage_class %}
storageClass: "{{ owncloud_storage_class }}"
{% endif %}
thumbnails:
persistence:
enabled: true
size: "2Gi"
{% if owncloud_storage_class %}
storageClass: "{{ owncloud_storage_class }}"
{% endif %}
{% if owncloud_metrics_enabled %}
# Метрики включены в OCIS по умолчанию на порту 9464
{% endif %}