diff --git a/Makefile b/Makefile index 8c64e66..220f86a 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,7 @@ collectors: else \ docker run --rm -v $$PWD:/workspace -w /workspace \ -e GOOS=linux -e GOARCH=amd64 -e GOCACHE=/workspace/.cache/go-build -e GOMODCACHE=/workspace/.cache/go-mod golang:1.22 \ - sh -c "go mod tidy >/dev/null 2>&1 && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/uptime ./src/collectors/uptime && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/macos ./src/collectors/macos && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/system ./src/collectors/system && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/hba ./src/collectors/hba"; \ + sh -c "go mod tidy >/dev/null 2>&1 && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/uptime ./src/collectors/uptime && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/macos ./src/collectors/macos && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/system ./src/collectors/system && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/hba ./src/collectors/hba && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/sensors ./src/collectors/sensors && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/docker ./src/collectors/docker"; \ fi @# Убедимся, что скрипты исполняемые @chmod +x ./bin/agent/collectors/*.sh 2>/dev/null || true @@ -61,7 +61,7 @@ collectors-linux: # Кросс-сборка коллекторов для Linux @mkdir -p ./bin/agent/collectors .cache/go-build .cache/go-mod; \ docker run --rm -v $$PWD:/workspace -w /workspace -e GOOS=linux -e GOARCH=amd64 -e GOCACHE=/workspace/.cache/go-build -e GOMODCACHE=/workspace/.cache/go-mod golang:1.22 \ - sh -c "go mod tidy >/dev/null 2>&1 && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/uptime ./src/collectors/uptime && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/macos ./src/collectors/macos && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/system ./src/collectors/system && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/hba ./src/collectors/hba" + sh -c "go mod tidy >/dev/null 2>&1 && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/uptime ./src/collectors/uptime && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/macos ./src/collectors/macos && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/system ./src/collectors/system && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/hba ./src/collectors/hba && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/sensors ./src/collectors/sensors && CGO_ENABLED=0 go build -trimpath -o ./bin/agent/collectors/docker ./src/collectors/docker" collectors-windows: # Кросс-сборка коллекторов для Windows @@ -112,26 +112,6 @@ deploy-service: build-linux collectors-linux ansible-playbook -i runner/inventory.ini runner/deploy-service-raw/playbook.yml -e LOCAL_BIN_DIR=/workspace/bin/agent -e ansible_ssh_private_key_file=/root/.ssh/id_rsa -e ansible_become=true -e ansible_become_method=sudo delete-service: - # Остановка сервиса и очистка (raw, без Python на целевой стороне) - docker run --rm -e ANSIBLE_HOST_KEY_CHECKING=False -v $$PWD:/workspace -v $$HOME/.ssh:/root/.ssh:ro -w /workspace cytopia/ansible:latest-tools \ - ansible-playbook -i runner/inventory.ini runner/delete-service-raw/playbook.yml -e ansible_ssh_private_key_file=/root/.ssh/id_rsa -e ansible_become=true -e ansible_become_method=sudo - -deploy-raw: build-linux collectors-linux - # Деплой без Python на целевом хосте (raw + scp) - docker run --rm -e ANSIBLE_HOST_KEY_CHECKING=False -v $$PWD:/workspace -v $$HOME/.ssh:/root/.ssh:ro -w /workspace cytopia/ansible:latest-tools \ - ansible-playbook -i runner/inventory.ini runner/deploy-raw/playbook.yml -e LOCAL_BIN_DIR=/workspace/bin/agent -e ansible_ssh_private_key_file=/root/.ssh/id_rsa -e ansible_become=true -e ansible_become_method=sudo - -delete-raw: - # Удаление без Python на целевом хосте (raw) - docker run --rm -e ANSIBLE_HOST_KEY_CHECKING=False -v $$PWD:/workspace -v $$HOME/.ssh:/root/.ssh:ro -w /workspace cytopia/ansible:latest-tools \ - ansible-playbook -i runner/inventory.ini runner/delete-raw/playbook.yml -e ansible_ssh_private_key_file=/root/.ssh/id_rsa -e ansible_become=true -e ansible_become_method=sudo - -deploy-service-raw: build-linux collectors-linux - # Деплой и запуск через systemd без Python на целевой стороне - docker run --rm -e ANSIBLE_HOST_KEY_CHECKING=False -v $$PWD:/workspace -v $$HOME/.ssh:/root/.ssh:ro -w /workspace cytopia/ansible:latest-tools \ - ansible-playbook -i runner/inventory.ini runner/deploy-service-raw/playbook.yml -e LOCAL_BIN_DIR=/workspace/bin/agent -e ansible_ssh_private_key_file=/root/.ssh/id_rsa -e ansible_become=true -e ansible_become_method=sudo - -delete-service-raw: # Остановка и очистка systemd-варианта без Python на целевой стороне docker run --rm -e ANSIBLE_HOST_KEY_CHECKING=False -v $$PWD:/workspace -v $$HOME/.ssh:/root/.ssh:ro -w /workspace cytopia/ansible:latest-tools \ ansible-playbook -i runner/inventory.ini runner/delete-service-raw/playbook.yml -e ansible_ssh_private_key_file=/root/.ssh/id_rsa -e ansible_become=true -e ansible_become_method=sudo @@ -140,6 +120,7 @@ delete-service-raw: ansible-playbook -i runner/inventory.ini runner/delete-raw/playbook.yml -e ansible_ssh_private_key_file=/root/.ssh/id_rsa -e ansible_become=true -e ansible_become_method=sudo + test: # Юнит-тесты в Docker без использования локальной машины @mkdir -p .cache/go-build .cache/go-mod; \ diff --git a/bin/agent/config.yaml b/bin/agent/config.yaml index 75f970e..8b25957 100644 --- a/bin/agent/config.yaml +++ b/bin/agent/config.yaml @@ -18,7 +18,7 @@ collectors: type: exec key: system interval: "30s" - timeout: "8s" + timeout: "20s" exec: "./collectors/system" platforms: [linux] uptime: @@ -53,6 +53,22 @@ collectors: timeout: "10s" exec: "./collectors/hba" platforms: [linux] + sensors: + enabled: true + type: exec + key: sensors + interval: "30s" + timeout: "8s" + exec: "./collectors/sensors" + platforms: [linux] + docker: + enabled: true + type: exec + key: docker + interval: "30s" + timeout: "20s" + exec: "./collectors/docker" + platforms: [darwin, linux] diff --git a/runner/deploy-raw/playbook.yml b/runner/deploy-raw/playbook.yml index 6860200..4f15826 100644 --- a/runner/deploy-raw/playbook.yml +++ b/runner/deploy-raw/playbook.yml @@ -7,39 +7,49 @@ vars: remote_dir: /opt/sensusagent local_bin_dir: "{{ LOCAL_BIN_DIR | default('./bin/agent') }}" + tmp_upload_dir: "/tmp/sensusagent_upload" tasks: - name: Ensure remote dir exists ansible.builtin.raw: "mkdir -p {{ remote_dir }} && chmod 0755 {{ remote_dir }}" - - name: Copy agent binary via scp (from controller) + - name: Ensure tmp upload dir exists and writable by user + ansible.builtin.raw: "mkdir -p {{ tmp_upload_dir }} && chmod 0777 {{ tmp_upload_dir }}" + + - name: Copy agent binary via scp (from controller) to tmp ansible.builtin.command: > scp -B -i {{ ansible_ssh_private_key_file | default('~/.ssh/id_rsa') }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null - {{ local_bin_dir }}/agent {{ ansible_user }}@{{ ansible_host }}:{{ remote_dir }}/agent + {{ local_bin_dir }}/agent {{ ansible_user }}@{{ ansible_host }}:{{ tmp_upload_dir }}/agent delegate_to: localhost - - name: Copy config via scp (from controller) + - name: Copy config via scp (from controller) to tmp ansible.builtin.command: > scp -B -i {{ ansible_ssh_private_key_file | default('~/.ssh/id_rsa') }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null - {{ local_bin_dir }}/config.yaml {{ ansible_user }}@{{ ansible_host }}:{{ remote_dir }}/config.yaml + {{ local_bin_dir }}/config.yaml {{ ansible_user }}@{{ ansible_host }}:{{ tmp_upload_dir }}/config.yaml delegate_to: localhost - - name: Copy collectors directory via scp -r (from controller) + - name: Copy collectors directory via scp -r (from controller) to tmp ansible.builtin.command: > scp -r -B -i {{ ansible_ssh_private_key_file | default('~/.ssh/id_rsa') }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null - {{ local_bin_dir }}/collectors {{ ansible_user }}@{{ ansible_host }}:{{ remote_dir }}/ + {{ local_bin_dir }}/collectors {{ ansible_user }}@{{ ansible_host }}:{{ tmp_upload_dir }}/ delegate_to: localhost - - name: Ensure collectors are executable - ansible.builtin.raw: "chmod -R 0755 {{ remote_dir }}/collectors 2>/dev/null || true" + - name: Move uploaded files into place as root and set permissions + ansible.builtin.raw: | + install -m 0755 {{ tmp_upload_dir }}/agent {{ remote_dir }}/agent && \ + install -m 0644 {{ tmp_upload_dir }}/config.yaml {{ remote_dir }}/config.yaml && \ + rm -rf {{ remote_dir }}/collectors && mkdir -p {{ remote_dir }}/collectors && \ + cp -a {{ tmp_upload_dir }}/collectors/. {{ remote_dir }}/collectors/ && \ + chmod -R 0755 {{ remote_dir }}/collectors + ignore_errors: no - name: Optional deps (Debian/Ubuntu) — ignore errors ansible.builtin.raw: | if [ -f /etc/debian_version ]; then \ apt-get update -o Acquire::AllowInsecureRepositories=true -o Acquire::https::Verify-Peer=false -o Acquire::https::Verify-Host=false || true; \ - apt-get install -y --no-install-recommends sysstat iotop smartmontools nvme-cli mdadm lsscsi sg3-utils pciutils || true; \ + apt-get install -y --no-install-recommends sysstat iotop smartmontools nvme-cli mdadm lsscsi sg3-utils pciutils lm-sensors ipmitool || true; \ systemctl enable --now sysstat || true; \ fi ignore_errors: yes @@ -47,7 +57,7 @@ - name: Optional deps (RHEL/CentOS) — ignore errors ansible.builtin.raw: | if [ -f /etc/redhat-release ]; then \ - yum install -y sysstat iotop smartmontools nvme-cli mdadm lsscsi sg3_utils pciutils || true; \ + yum install -y sysstat iotop smartmontools nvme-cli mdadm lsscsi sg3_utils pciutils lm_sensors ipmitool || true; \ systemctl enable --now sysstat || true; \ fi ignore_errors: yes diff --git a/runner/inventory.ini b/runner/inventory.ini index 5970c6d..bae363b 100644 --- a/runner/inventory.ini +++ b/runner/inventory.ini @@ -1,4 +1,4 @@ [targets] # example: # server1 ansible_host=1.2.3.4 ansible_user=root -ansible ansible_host=10.14.246.9 ansible_user=devops +kube_ansible ansible_host=10.14.246.9 ansible_user=devops diff --git a/src/collectors/docker/docker_darwin.go b/src/collectors/docker/docker_darwin.go new file mode 100644 index 0000000..f263413 --- /dev/null +++ b/src/collectors/docker/docker_darwin.go @@ -0,0 +1,292 @@ +//go:build darwin + +package main + +// Автор: Сергей Антропов, сайт: https://devops.org.ru +// macOS-реализация docker-коллектора: используем docker CLI (Docker Desktop) +// для получения версий, контейнеров (cpu_pct, mem_bytes), сетей и томов. + +import ( + "bufio" + "context" + "encoding/json" + "os/exec" + "strconv" + "strings" +) + +// collectDocker собирает информацию о докере на macOS через docker CLI. +func collectDocker(ctx context.Context) (map[string]any, error) { + res := map[string]any{} + // краткие версии + res["versions"] = versionsDarwin(ctx) + + // контейнеры: минимальные поля (id, name, image, cpu_pct, mem_bytes) + ps := dockerPSRowsDarwin(ctx) + stats := dockerStatsMapDarwin(ctx) + containers := []map[string]any{} + for _, r := range ps { + item := map[string]any{"id": r.ID, "image": r.Image, "name": r.Names} + if st, ok := stats[r.ID]; ok { + item["cpu_pct"] = parseCPUPercDarwin(st.CPUPerc) + item["mem_bytes"] = parseMemUsageBytesDarwin(st.MemUsage) + } + containers = append(containers, item) + } + if len(containers) > 0 { res["containers"] = containers } + + // сети: name, id, driver, scope + netsLS := dockerJSONDarwin(ctx, "network", "ls") + networks := []map[string]any{} + for _, m := range netsLS { + item := map[string]any{} + if v, ok := m["Name"].(string); ok { item["name"] = v } + if v, ok := m["ID"].(string); ok { item["id"] = v } + if v, ok := m["Driver"].(string); ok { item["driver"] = v } + if v, ok := m["Scope"].(string); ok { item["scope"] = v } + if len(item) > 0 { networks = append(networks, item) } + } + if len(networks) > 0 { res["networks"] = networks } + + // тома: name, driver (без расчёта размеров на macOS) + volsLS := dockerJSONDarwin(ctx, "volume", "ls") + vols := []map[string]any{} + for _, m := range volsLS { + item := map[string]any{} + if v, ok := m["Name"].(string); ok { item["name"] = v } + if v, ok := m["Driver"].(string); ok { item["driver"] = v } + if len(item) > 0 { vols = append(vols, item) } + } + if len(vols) > 0 { res["volumes"] = map[string]any{"items": vols} } + + // образы: repository, tag, id, size_bytes + общий размер + imgsLS := dockerJSONDarwin(ctx, "image", "ls") + images := []map[string]any{} + var totalImg uint64 + for _, m := range imgsLS { + item := map[string]any{} + if v, ok := m["Repository"].(string); ok { item["repository"] = v } + if v, ok := m["Tag"].(string); ok { item["tag"] = v } + if v, ok := m["ID"].(string); ok { item["id"] = v } + if v, ok := m["Size"].(string); ok { sz := humanToBytesDarwin(v); item["size_bytes"] = sz; totalImg += sz } + if len(item) > 0 { images = append(images, item) } + } + if len(images) > 0 { res["images"] = map[string]any{"total_size_bytes": totalImg, "items": images} } + + if len(res) == 0 { return nil, nil } + return res, nil +} + +func versionsDarwin(ctx context.Context) map[string]any { + v := map[string]any{} + if out, err := runDarwin(ctx, "sh", "-c", "docker --version 2>/dev/null"); err == nil { v["docker"] = strings.TrimSpace(out) } + if out, err := runDarwin(ctx, "sh", "-c", "docker compose version 2>/dev/null || docker-compose --version 2>/dev/null"); err == nil { v["compose"] = strings.TrimSpace(out) } + if out, err := runDarwin(ctx, "sh", "-c", "/usr/local/bin/ctr --version 2>/dev/null || /opt/homebrew/bin/ctr --version 2>/dev/null || containerd --version 2>/dev/null"); err == nil { v["containerd"] = strings.TrimSpace(out) } + return v +} + +type dockerPSRow struct { + ID string `json:"ID"` + Image string `json:"Image"` + Names string `json:"Names"` +} + +func dockerPSRowsDarwin(ctx context.Context) []dockerPSRow { + out, err := runDarwin(ctx, "sh", "-c", "docker ps --no-trunc --format '{{json .}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return nil } + res := []dockerPSRow{} + s := bufio.NewScanner(strings.NewReader(out)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if line == "" { continue } + var row dockerPSRow + _ = json.Unmarshal([]byte(line), &row) + res = append(res, row) + } + return res +} + +func containerUsageDarwin(ctx context.Context, id string) (float64, uint64) { + // Используем docker stats (no-stream) + if out, err := runDarwin(ctx, "sh", "-c", "docker stats --no-stream --format '{{.CPUPerc}} {{.MemUsage}}' "+id+" 2>/dev/null"); err == nil { + fields := strings.Fields(strings.TrimSpace(out)) + if len(fields) >= 2 { + cpu := parseCPUPercDarwin(fields[0]) + mem := parseMemUsageBytesDarwin(strings.Join(fields[1:], " ")) + return cpu, mem + } + } + return 0, 0 +} + +type statRowDarwin struct { + ID string + CPUPerc string + MemUsage string + NetIO string + BlockIO string + PIDs string +} + +func dockerStatsMapDarwin(ctx context.Context) map[string]statRowDarwin { + out, err := runDarwin(ctx, "sh", "-c", "docker stats --no-stream --format '{{.ID}}|{{.CPUPerc}}|{{.MemUsage}}|{{.NetIO}}|{{.BlockIO}}|{{.PIDs}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]statRowDarwin{} } + m := map[string]statRowDarwin{} + s := bufio.NewScanner(strings.NewReader(out)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if line == "" { continue } + parts := strings.Split(line, "|") + row := statRowDarwin{} + if len(parts) > 0 { row.ID = parts[0] } + if len(parts) > 1 { row.CPUPerc = parts[1] } + if len(parts) > 2 { row.MemUsage = parts[2] } + if len(parts) > 3 { row.NetIO = parts[3] } + if len(parts) > 4 { row.BlockIO = parts[4] } + if len(parts) > 5 { row.PIDs = parts[5] } + if row.ID != "" { m[row.ID] = row } + } + return m +} + +func idsFromPSDarwin(ps []dockerPSRow) []string { + ids := make([]string, 0, len(ps)) + for _, r := range ps { if r.ID != "" { ids = append(ids, r.ID) } } + return ids +} + +func assembleContainersDarwin(ps []dockerPSRow, stats map[string]statRowDarwin, inspects []map[string]any) []map[string]any { + idx := map[string]map[string]any{} + for _, obj := range inspects { + if v, ok := obj["Id"].(string); ok && v != "" { idx[v] = obj } + } + res := []map[string]any{} + for _, r := range ps { + item := map[string]any{"id": r.ID, "image": r.Image, "name": r.Names} + if st, ok := stats[r.ID]; ok { + item["cpu_pct"] = parseCPUPercDarwin(st.CPUPerc) + item["mem_bytes"] = parseMemUsageBytesDarwin(st.MemUsage) + rx, tx := parsePairBytesDarwin(st.NetIO) + rd, wr := parsePairBytesDarwin(st.BlockIO) + item["net_rx_bytes"] = rx + item["net_tx_bytes"] = tx + item["blk_read_bytes"] = rd + item["blk_write_bytes"] = wr + if p, err := strconv.ParseUint(strings.TrimSpace(st.PIDs), 10, 64); err == nil { item["pids"] = p } + } + if in, ok := idx[r.ID]; ok { + if s, ok2 := in["State"].(map[string]any); ok2 { item["state"] = s } + if cfg, ok2 := in["Config"].(map[string]any); ok2 { item["config"] = cfg } + if host, ok2 := in["HostConfig"].(map[string]any); ok2 { item["host_config"] = host } + if net, ok2 := in["NetworkSettings"].(map[string]any); ok2 { item["network_settings"] = net } + if mnts, ok2 := in["Mounts"].([]any); ok2 { item["mounts"] = mnts } + if created, ok2 := in["Created"].(string); ok2 { item["created"] = created } + if name, ok2 := in["Name"].(string); ok2 && name != "" { item["name"] = strings.TrimPrefix(name, "/") } + if imageId, ok2 := in["Image"].(string); ok2 { item["image_id"] = imageId } + } + res = append(res, item) + } + return res +} + +func parsePairBytesDarwin(s string) (uint64, uint64) { + parts := strings.Split(s, "/") + if len(parts) != 2 { return 0, 0 } + a := humanToBytesDarwin(strings.TrimSpace(parts[0])) + b := humanToBytesDarwin(strings.TrimSpace(parts[1])) + return a, b +} + +func dockerInspectDarwin(ctx context.Context, resType string, ids []string) []map[string]any { + if len(ids) == 0 { return nil } + outAll := []map[string]any{} + batch := 30 + for i := 0; i < len(ids); i += batch { + j := i + batch + if j > len(ids) { j = len(ids) } + out, err := runDarwin(ctx, "sh", "-c", "docker "+resType+" inspect "+strings.Join(ids[i:j], " ")+" 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { continue } + var arr []map[string]any + _ = json.Unmarshal([]byte(out), &arr) + outAll = append(outAll, arr...) + } + return outAll +} + +func namesOrIDsDarwin(list []map[string]any) []string { + res := []string{} + for _, m := range list { + if v, ok := m["Name"].(string); ok && v != "" { res = append(res, v); continue } + if v, ok := m["ID"].(string); ok && v != "" { res = append(res, v); continue } + } + return res +} + +func dockerInfoDarwin(ctx context.Context) map[string]any { + out, err := runDarwin(ctx, "sh", "-c", "docker info --format '{{json .}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]any{} } + m := map[string]any{} + _ = json.Unmarshal([]byte(out), &m) + return m +} + +func dockerVersionDetailDarwin(ctx context.Context) map[string]any { + out, err := runDarwin(ctx, "sh", "-c", "docker version --format '{{json .}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]any{} } + m := map[string]any{} + _ = json.Unmarshal([]byte(out), &m) + return m +} + +// Вспомогательные функции для macOS парсинга и запуска +func runDarwin(ctx context.Context, bin string, args ...string) (string, error) { + b, err := exec.CommandContext(ctx, bin, args...).Output() + if err != nil { return "", err } + return string(b), nil +} + +func dockerJSONDarwin(ctx context.Context, resource string, action string) []map[string]any { + cmd := "docker " + resource + " " + action + " --format '{{json .}}' 2>/dev/null" + out, err := runDarwin(ctx, "sh", "-c", cmd) + if err != nil || strings.TrimSpace(out) == "" { return nil } + res := []map[string]any{} + s := bufio.NewScanner(strings.NewReader(out)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if line == "" { continue } + m := map[string]any{} + _ = json.Unmarshal([]byte(line), &m) + res = append(res, m) + } + return res +} + +func parseCPUPercDarwin(s string) float64 { + s = strings.TrimSpace(strings.TrimSuffix(s, "%")) + if s == "" { return 0 } + f, _ := strconv.ParseFloat(s, 64) + return f +} + +func parseMemUsageBytesDarwin(s string) uint64 { + left := s + if i := strings.Index(s, "/"); i > 0 { left = s[:i] } + left = strings.TrimSpace(left) + return humanToBytesDarwin(left) +} + +func humanToBytesDarwin(s string) uint64 { + s = strings.TrimSpace(strings.ToUpper(s)) + s = strings.ReplaceAll(s, " ", "") + mult := float64(1) + for _, suf := range []struct{K string; M float64}{ + {"KIB", 1024}, {"MIB", 1024*1024}, {"GIB", 1024*1024*1024}, + {"KB", 1000}, {"MB", 1000*1000}, {"GB", 1000*1000*1000}, + } { + if strings.HasSuffix(s, suf.K) { mult = suf.M; s = strings.TrimSuffix(s, suf.K); break } + } + f, _ := strconv.ParseFloat(s, 64) + return uint64(f * mult) +} + + diff --git a/src/collectors/docker/docker_linux.go b/src/collectors/docker/docker_linux.go new file mode 100644 index 0000000..b1bd4ef --- /dev/null +++ b/src/collectors/docker/docker_linux.go @@ -0,0 +1,361 @@ +//go:build linux + +package main + +// Автор: Сергей Антропов, сайт: https://devops.org.ru +// Linux-реализация docker-коллектора: собирает версии docker/compose/containerd, +// перечисляет контейнеры (name, id, image, cpu_pct, mem_bytes), сети, тома. +// Для cpu_pct/mem_bytes и сетевых/дисковых метрик используем docker CLI и /sys/fs/cgroup. + +import ( + "bufio" + "context" + "encoding/json" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" +) + +func collectDocker(ctx context.Context) (map[string]any, error) { + res := map[string]any{} + // краткие версии + res["versions"] = versions(ctx) + + // контейнеры: минимальные поля + немного статистики (id, name, image, cpu_pct, mem_bytes, net/blk IO, pids) + psRows := dockerPSRows(ctx) + stats := dockerStatsMap(ctx) + containers := []map[string]any{} + for _, r := range psRows { + item := map[string]any{"id": r.ID, "image": r.Image, "name": r.Names} + if st, ok := stats[r.ID]; ok { + item["cpu_pct"] = parseCPUPerc(st.CPUPerc) + item["mem_bytes"] = parseMemUsageBytes(st.MemUsage) + rx, tx := parsePairBytes(st.NetIO) + rd, wr := parsePairBytes(st.BlockIO) + item["net_rx_bytes"] = rx + item["net_tx_bytes"] = tx + item["blk_read_bytes"] = rd + item["blk_write_bytes"] = wr + if p, err := strconv.ParseUint(strings.TrimSpace(st.PIDs), 10, 64); err == nil { item["pids"] = p } + } + containers = append(containers, item) + } + if len(containers) > 0 { res["containers"] = containers } + + // сети: name, id, driver, scope + netsLS := dockerJSON(ctx, "network", "ls") + networks := []map[string]any{} + for _, m := range netsLS { + item := map[string]any{} + if v, ok := m["Name"].(string); ok { item["name"] = v } + if v, ok := m["ID"].(string); ok { item["id"] = v } + if v, ok := m["Driver"].(string); ok { item["driver"] = v } + if v, ok := m["Scope"].(string); ok { item["scope"] = v } + if len(item) > 0 { networks = append(networks, item) } + } + if len(networks) > 0 { res["networks"] = networks } + + // тома: name, driver, size_bytes + total_size_bytes + volsLS := dockerJSON(ctx, "volume", "ls") + vols := []map[string]any{} + for _, m := range volsLS { + item := map[string]any{} + if v, ok := m["Name"].(string); ok { item["name"] = v } + if v, ok := m["Driver"].(string); ok { item["driver"] = v } + if len(item) > 0 { vols = append(vols, item) } + } + sizes, total := volumeSizes(ctx) + for i := range vols { + name, _ := vols[i]["name"].(string) + vols[i]["size_bytes"] = sizes[name] + } + if len(vols) > 0 { + res["volumes"] = map[string]any{ + "total_size_bytes": total, + "items": vols, + } + } + + // образы: repository, tag, id, size_bytes + общий размер + imgsLS := dockerJSON(ctx, "image", "ls") + images := []map[string]any{} + var totalImg uint64 + for _, m := range imgsLS { + item := map[string]any{} + if v, ok := m["Repository"].(string); ok { item["repository"] = v } + if v, ok := m["Tag"].(string); ok { item["tag"] = v } + if v, ok := m["ID"].(string); ok { item["id"] = v } + if v, ok := m["Size"].(string); ok { sz := humanToBytes(v); item["size_bytes"] = sz; totalImg += sz } + if len(item) > 0 { images = append(images, item) } + } + if len(images) > 0 { res["images"] = map[string]any{"total_size_bytes": totalImg, "items": images} } + + if len(res) == 0 { return nil, nil } + return res, nil +} + +func versions(ctx context.Context) map[string]any { + v := map[string]any{} + if out, err := run(ctx, "sh", "-c", "docker --version 2>/dev/null"); err == nil { v["docker"] = strings.TrimSpace(out) } + if out, err := run(ctx, "sh", "-c", "docker compose version 2>/dev/null || docker-compose --version 2>/dev/null"); err == nil { v["compose"] = strings.TrimSpace(out) } + if out, err := run(ctx, "sh", "-c", "containerd --version 2>/dev/null"); err == nil { v["containerd"] = strings.TrimSpace(out) } + return v +} + +type dockerPS struct { + ID string `json:"ID"` + Image string `json:"Image"` + Names string `json:"Names"` +} + +func dockerPSRows(ctx context.Context) []dockerPS { + out, err := run(ctx, "sh", "-c", "docker ps --no-trunc --format '{{json .}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return nil } + res := []dockerPS{} + s := bufio.NewScanner(strings.NewReader(out)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if line == "" { continue } + var row dockerPS + _ = json.Unmarshal([]byte(line), &row) + res = append(res, row) + } + return res +} + +func idsFromPS(ps []dockerPS) []string { + ids := make([]string, 0, len(ps)) + for _, r := range ps { if r.ID != "" { ids = append(ids, r.ID) } } + return ids +} + +type statRow struct { + ID string + CPUPerc string + MemUsage string + NetIO string + BlockIO string + PIDs string +} + +func dockerStatsMap(ctx context.Context) map[string]statRow { + out, err := run(ctx, "sh", "-c", "docker stats --no-stream --format '{{.ID}}|{{.CPUPerc}}|{{.MemUsage}}|{{.NetIO}}|{{.BlockIO}}|{{.PIDs}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]statRow{} } + m := map[string]statRow{} + s := bufio.NewScanner(strings.NewReader(out)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if line == "" { continue } + parts := strings.Split(line, "|") + row := statRow{} + if len(parts) > 0 { row.ID = parts[0] } + if len(parts) > 1 { row.CPUPerc = parts[1] } + if len(parts) > 2 { row.MemUsage = parts[2] } + if len(parts) > 3 { row.NetIO = parts[3] } + if len(parts) > 4 { row.BlockIO = parts[4] } + if len(parts) > 5 { row.PIDs = parts[5] } + if row.ID != "" { m[row.ID] = row } + } + return m +} + +func assembleContainers(ps []dockerPS, stats map[string]statRow, inspects []map[string]any) []map[string]any { + // индекс инспектов по Id + idx := map[string]map[string]any{} + for _, obj := range inspects { + if v, ok := obj["Id"].(string); ok && v != "" { idx[v] = obj } + } + res := []map[string]any{} + for _, r := range ps { + item := map[string]any{"id": r.ID, "image": r.Image, "name": r.Names} + if st, ok := stats[r.ID]; ok { + item["cpu_pct"] = parseCPUPerc(st.CPUPerc) + item["mem_bytes"] = parseMemUsageBytes(st.MemUsage) + rx, tx := parsePairBytes(st.NetIO) + rd, wr := parsePairBytes(st.BlockIO) + item["net_rx_bytes"] = rx + item["net_tx_bytes"] = tx + item["blk_read_bytes"] = rd + item["blk_write_bytes"] = wr + if p, err := strconv.ParseUint(strings.TrimSpace(st.PIDs), 10, 64); err == nil { item["pids"] = p } + } + if in, ok := idx[r.ID]; ok { + if s, ok2 := in["State"].(map[string]any); ok2 { item["state"] = s } + if cfg, ok2 := in["Config"].(map[string]any); ok2 { item["config"] = cfg } + if host, ok2 := in["HostConfig"].(map[string]any); ok2 { item["host_config"] = host } + if net, ok2 := in["NetworkSettings"].(map[string]any); ok2 { item["network_settings"] = net } + if mnts, ok2 := in["Mounts"].([]any); ok2 { item["mounts"] = mnts } + if created, ok2 := in["Created"].(string); ok2 { item["created"] = created } + if name, ok2 := in["Name"].(string); ok2 && name != "" { item["name"] = strings.TrimPrefix(name, "/") } + if imageId, ok2 := in["Image"].(string); ok2 { item["image_id"] = imageId } + } + res = append(res, item) + } + return res +} + +func parsePairBytes(s string) (uint64, uint64) { + // Формат вида "123kB / 456kB" + parts := strings.Split(s, "/") + if len(parts) != 2 { return 0, 0 } + a := humanToBytes(strings.TrimSpace(parts[0])) + b := humanToBytes(strings.TrimSpace(parts[1])) + return a, b +} + +func containerUsage(ctx context.Context, id string) (float64, uint64) { + // Попытка через docker stats (разовый сэмпл) + if out, err := run(ctx, "sh", "-c", "docker stats --no-stream --format '{{.CPUPerc}} {{.MemUsage}}' "+id+" 2>/dev/null"); err == nil { + fields := strings.Fields(strings.TrimSpace(out)) + if len(fields) >= 2 { + cpu := parseCPUPerc(fields[0]) + mem := parseMemUsageBytes(strings.Join(fields[1:], " ")) + return cpu, mem + } + } + // Fallback: cgroup memory.current (cgroups v2) или memory.usage_in_bytes (v1) + mem := readFirstExistingUint([]string{ + filepath.Join("/sys/fs/cgroup/docker", id, "memory.current"), + filepath.Join("/sys/fs/cgroup/memory/docker", id, "memory.usage_in_bytes"), + }) + return 0, mem +} + +func parseCPUPerc(s string) float64 { + s = strings.TrimSpace(strings.TrimSuffix(s, "%")) + if s == "" { return 0 } + f, _ := strconv.ParseFloat(s, 64) + return f +} + +func parseMemUsageBytes(s string) uint64 { + // Форматы вида: "123.4MiB/2GiB" — берём левую часть до '/' + left := s + if i := strings.Index(s, "/"); i > 0 { left = s[:i] } + left = strings.TrimSpace(left) + return humanToBytes(left) +} + +func humanToBytes(s string) uint64 { + s = strings.TrimSpace(strings.ToUpper(s)) + // Убираем пробел между числом и суффиксом + s = strings.ReplaceAll(s, " ", "") + mult := float64(1) + for _, suf := range []struct{K string; M float64}{ + {"KIB", 1024}, {"MIB", 1024*1024}, {"GIB", 1024*1024*1024}, + {"KB", 1000}, {"MB", 1000*1000}, {"GB", 1000*1000*1000}, + } { + if strings.HasSuffix(s, suf.K) { mult = suf.M; s = strings.TrimSuffix(s, suf.K); break } + } + f, _ := strconv.ParseFloat(s, 64) + return uint64(f * mult) +} + +func readFirstExistingUint(paths []string) uint64 { + for _, p := range paths { + b, err := os.ReadFile(p) + if err == nil && len(b) > 0 { + v, _ := strconv.ParseUint(strings.TrimSpace(string(b)), 10, 64) + return v + } + } + return 0 +} + +func dockerJSON(ctx context.Context, resource string, action string) []map[string]any { + cmd := "docker " + resource + " " + action + " --format '{{json .}}' 2>/dev/null" + out, err := run(ctx, "sh", "-c", cmd) + if err != nil || strings.TrimSpace(out) == "" { return nil } + res := []map[string]any{} + s := bufio.NewScanner(strings.NewReader(out)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if line == "" { continue } + m := map[string]any{} + _ = json.Unmarshal([]byte(line), &m) + res = append(res, m) + } + return res +} + +// dockerInspect возвращает объединённый массив результатов docker inspect ... +func dockerInspect(ctx context.Context, resType string, ids []string) []map[string]any { + if len(ids) == 0 { return nil } + // Пакетная обработка, чтобы не превысить ограничение длины команды + outAll := []map[string]any{} + batch := 30 + for i := 0; i < len(ids); i += batch { + j := i + batch + if j > len(ids) { j = len(ids) } + out, err := run(ctx, "sh", "-c", "docker "+resType+" inspect "+strings.Join(ids[i:j], " ")+" 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { continue } + var arr []map[string]any + _ = json.Unmarshal([]byte(out), &arr) + outAll = append(outAll, arr...) + } + return outAll +} + +func namesOrIDs(list []map[string]any) []string { + res := []string{} + for _, m := range list { + if v, ok := m["Name"].(string); ok && v != "" { res = append(res, v); continue } + if v, ok := m["ID"].(string); ok && v != "" { res = append(res, v); continue } + } + return res +} + +func dockerInfo(ctx context.Context) map[string]any { + out, err := run(ctx, "sh", "-c", "docker info --format '{{json .}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]any{} } + m := map[string]any{} + _ = json.Unmarshal([]byte(out), &m) + return m +} + +func dockerVersionDetail(ctx context.Context) map[string]any { + out, err := run(ctx, "sh", "-c", "docker version --format '{{json .}}' 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]any{} } + m := map[string]any{} + _ = json.Unmarshal([]byte(out), &m) + return m +} + +func run(ctx context.Context, bin string, args ...string) (string, error) { + cmd := exec.CommandContext(ctx, bin, args...) + b, err := cmd.Output() + if err != nil { return "", err } + return string(b), nil +} + + + +// volumeSizes — парсит вывод "docker system df -v" и возвращает размеры томов и общий размер +func volumeSizes(ctx context.Context) (map[string]uint64, uint64) { + out, err := run(ctx, "sh", "-c", "docker system df -v 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]uint64{}, 0 } + sizes := map[string]uint64{} + var inVolumes bool + total := uint64(0) + s := bufio.NewScanner(strings.NewReader(out)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if line == "" { if inVolumes { break }; continue } + low := strings.ToLower(line) + if strings.HasPrefix(low, "local volumes space usage") { inVolumes = true; continue } + if inVolumes { + if strings.HasPrefix(low, "images space usage") || strings.HasPrefix(low, "containers space usage") { break } + fields := strings.Fields(line) + if len(fields) < 2 { continue } + name := fields[0] + sizeStr := fields[len(fields)-1] + sz := humanToBytes(sizeStr) + sizes[name] = sz + total += sz + } + } + return sizes, total +} + diff --git a/src/collectors/docker/docker_unsupported.go b/src/collectors/docker/docker_unsupported.go new file mode 100644 index 0000000..535194a --- /dev/null +++ b/src/collectors/docker/docker_unsupported.go @@ -0,0 +1,16 @@ +//go:build !linux + +package main + +// Автор: Сергей Антропов, сайт: https://devops.org.ru +// Заглушка: на неподдерживаемых ОС возвращаем пустой JSON. + +import ( + "context" +) + +func collectDocker(ctx context.Context) (map[string]any, error) { + return nil, nil +} + + diff --git a/src/collectors/docker/main.go b/src/collectors/docker/main.go new file mode 100644 index 0000000..ba13fe1 --- /dev/null +++ b/src/collectors/docker/main.go @@ -0,0 +1,41 @@ +package main + +// Автор: Сергей Антропов, сайт: https://devops.org.ru +// Коллектор docker. Собирает информацию о контейнерах, сетях, томах и версиях. + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "time" +) + +// collectDocker реализуется платформенно. + +func main() { + // Таймаут можно переопределить окружением COLLECTOR_TIMEOUT + timeout := parseDurationOr("COLLECTOR_TIMEOUT", 8*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + data, err := collectDocker(ctx) + if err != nil || data == nil { + fmt.Println("{}") + return + } + enc := json.NewEncoder(os.Stdout) + enc.SetEscapeHTML(false) + _ = enc.Encode(data) +} + +func parseDurationOr(env string, def time.Duration) time.Duration { + v := strings.TrimSpace(os.Getenv(env)) + if v == "" { return def } + d, err := time.ParseDuration(v) + if err != nil { return def } + return d +} + + diff --git a/src/collectors/sensors/main.go b/src/collectors/sensors/main.go new file mode 100644 index 0000000..80ec613 --- /dev/null +++ b/src/collectors/sensors/main.go @@ -0,0 +1,43 @@ +package main + +// Автор: Сергей Антропов, сайт: https://devops.org.ru +// Коллектор sensors. Собирает данные с lm-sensors и IPMI (если доступно). + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "time" +) + +// collectSensors реализуется платформенно. + +func main() { + // Таймаут можно переопределить окружением COLLECTOR_TIMEOUT + timeout := parseDurationOr("COLLECTOR_TIMEOUT", 8*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + data, err := collectSensors(ctx) + if err != nil || data == nil { + fmt.Println("{}") + return + } + enc := json.NewEncoder(os.Stdout) + enc.SetEscapeHTML(false) + _ = enc.Encode(data) +} + +func parseDurationOr(env string, def time.Duration) time.Duration { + v := strings.TrimSpace(os.Getenv(env)) + if v == "" { + return def + } + d, err := time.ParseDuration(v) + if err != nil { + return def + } + return d +} diff --git a/src/collectors/sensors/sensors_linux.go b/src/collectors/sensors/sensors_linux.go new file mode 100644 index 0000000..e276532 --- /dev/null +++ b/src/collectors/sensors/sensors_linux.go @@ -0,0 +1,86 @@ +//go:build linux + +package main + +// Автор: Сергей Антропов, сайт: https://devops.org.ru +// Linux-реализация sensors: lm-sensors и IPMI (ipmitool) при наличии. + +import ( + "bufio" + "context" + "os/exec" + "regexp" + "strconv" + "strings" +) + +// collectSensors собирает сводную информацию по температуре/вентиляторам/питанию и статусам chassis. +func collectSensors(ctx context.Context) (map[string]any, error) { + res := map[string]any{} + if lm := collectLmSensors(ctx); len(lm) > 0 { res["lm_sensors"] = lm } + if ipmi := collectIPMI(ctx); len(ipmi) > 0 { res["ipmi"] = ipmi } + if len(res) == 0 { return nil, nil } + return res, nil +} + +// collectLmSensors парсит выходы sensors (lm-sensors): CPU temp, FAN RPM, PSU status +func collectLmSensors(ctx context.Context) map[string]any { + out, err := run(ctx, "sh", "-c", "LC_ALL=C sensors 2>/dev/null") + if err != nil || strings.TrimSpace(out) == "" { return map[string]any{} } + res := map[string]any{} + cpuTemps := []float64{} + fans := []map[string]any{} + psu := []string{} + s := bufio.NewScanner(strings.NewReader(out)) + reTemp := regexp.MustCompile(`(?i)(temp(?:\s*\d+)?):\s*\+?(-?\d+\.?\d*)°C`) + reFan := regexp.MustCompile(`(?i)(fan(?:\s*\d+)?):\s*(\d+)\s*RPM`) + for s.Scan() { + line := s.Text() + if m := reTemp.FindStringSubmatch(line); len(m) == 3 { + if v, err := strconv.ParseFloat(m[2], 64); err == nil { + cpuTemps = append(cpuTemps, v) + } + continue + } + if m := reFan.FindStringSubmatch(line); len(m) == 3 { + if rpm, err := strconv.Atoi(m[2]); err == nil { + fans = append(fans, map[string]any{"name": strings.TrimSpace(m[1]), "rpm": rpm}) + } + continue + } + if strings.Contains(strings.ToLower(line), "psu") && (strings.Contains(strings.ToLower(line), "ok") || strings.Contains(strings.ToLower(line), "present")) { + psu = append(psu, strings.TrimSpace(line)) + } + } + if len(cpuTemps) > 0 { res["cpu_temps_c"] = cpuTemps } + if len(fans) > 0 { res["fans"] = fans } + if len(psu) > 0 { res["psu_status"] = psu } + return res +} + +// collectIPMI собирает базовую информацию по IPMI: питание, резервирование PSU, предупреждения корпуса +func collectIPMI(ctx context.Context) map[string]any { + // Проверяем наличие ipmitool и доступ к BMC + if _, err := exec.LookPath("ipmitool"); err != nil { return map[string]any{} } + res := map[string]any{} + if out, err := run(ctx, "sh", "-c", "ipmitool chassis status 2>/dev/null"); err == nil && strings.TrimSpace(out) != "" { + res["chassis_status"] = out + } + if out, err := run(ctx, "sh", "-c", "ipmitool sdr elist all 2>/dev/null"); err == nil && strings.TrimSpace(out) != "" { + // Можно доразобрать на ключ-значение по сенсорам + res["sdr"] = out + } + if out, err := run(ctx, "sh", "-c", "ipmitool sensor 2>/dev/null"); err == nil && strings.TrimSpace(out) != "" { + res["sensor"] = out + } + return res +} + +func run(ctx context.Context, bin string, args ...string) (string, error) { + cmd := exec.CommandContext(ctx, bin, args...) + b, err := cmd.Output() + if err != nil { return "", err } + return string(b), nil +} + + diff --git a/src/collectors/sensors/sensors_unsupported.go b/src/collectors/sensors/sensors_unsupported.go new file mode 100644 index 0000000..41a14eb --- /dev/null +++ b/src/collectors/sensors/sensors_unsupported.go @@ -0,0 +1,16 @@ +//go:build !linux + +package main + +// Автор: Сергей Антропов, сайт: https://devops.org.ru +// Заглушка для неподдерживаемых ОС: возвращаем пустой JSON. + +import ( + "context" +) + +func collectSensors(ctx context.Context) (map[string]any, error) { + return nil, nil +} + + diff --git a/src/core/execcollectors/exec.go b/src/core/execcollectors/exec.go index ca3209f..c59cfa7 100644 --- a/src/core/execcollectors/exec.go +++ b/src/core/execcollectors/exec.go @@ -66,6 +66,8 @@ func (e *execCollector) Collect(ctx context.Context) (collector.Result, error) { } } cmd := osExec.CommandContext(ctx, cmdStr) + // Передаём таймаут в дочерний процесс, чтобы коллектор мог сам сократить объём работы + cmd.Env = append(os.Environ(), fmt.Sprintf("COLLECTOR_TIMEOUT=%s", e.timeout.String())) out, err := cmd.Output() if err != nil { return nil, fmt.Errorf("exec failed: %w", err)