- Упрощена функция collectProxCluster для точного соответствия требуемой структуре - Убраны лишние поля из summary, оставлены только: cluster_id, cluster_uuid, name, version, cluster_resources, quorum, corosync - Улучшена обработка ошибок - Структура вывода теперь точно соответствует спецификации Автор: Сергей Антропов, сайт: https://devops.org.ru
1530 lines
43 KiB
Go
1530 lines
43 KiB
Go
//go:build linux
|
||
|
||
package main
|
||
|
||
// Автор: Сергей Антропов, сайт: https://devops.org.ru
|
||
// Сбор информации о Proxmox кластере для Linux.
|
||
|
||
import (
|
||
"bufio"
|
||
"context"
|
||
"crypto/sha256"
|
||
"encoding/hex"
|
||
"encoding/json"
|
||
"errors"
|
||
"fmt"
|
||
"os"
|
||
"os/exec"
|
||
"strconv"
|
||
"strings"
|
||
"time"
|
||
)
|
||
|
||
// collectProxCluster собирает подробную информацию о Proxmox кластере:
|
||
// Структура вывода:
|
||
// 1. summary - вся информация по кластеру
|
||
// 2. nodes - вся информация по нодам
|
||
// Примечание: services, storages, logs, gpu, disks, network, vms вынесены в отдельные коллекторы
|
||
func collectProxCluster(ctx context.Context) (map[string]any, error) {
|
||
// Основная информация о кластере из corosync.conf
|
||
clusterInfo, err := collectClusterInfo(ctx)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
// Получаем данные для агрегированных ресурсов
|
||
clusterUUID := ""
|
||
clusterName := ""
|
||
if uuid, ok := clusterInfo["cluster_uuid"].(string); ok {
|
||
clusterUUID = uuid
|
||
}
|
||
if name, ok := clusterInfo["name"].(string); ok {
|
||
clusterName = name
|
||
}
|
||
|
||
// Собираем информацию о нодах
|
||
nodesInfo, err := collectDetailedNodesInfo(ctx, clusterName, clusterUUID)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
// Создаем блок summary с точной структурой
|
||
summary := map[string]any{
|
||
"cluster_id": clusterInfo["cluster_id"],
|
||
"cluster_uuid": clusterInfo["cluster_uuid"],
|
||
"name": clusterInfo["name"],
|
||
"version": clusterInfo["version"],
|
||
}
|
||
|
||
// Агрегированная информация о ресурсах кластера
|
||
if nodesInfo != nil {
|
||
clusterResources, err := calculateClusterResources(nodesInfo, nil)
|
||
if err == nil {
|
||
summary["cluster_resources"] = clusterResources
|
||
}
|
||
}
|
||
|
||
// Информация о кворуме
|
||
quorumInfo, err := collectQuorumInfo(ctx)
|
||
if err == nil {
|
||
summary["quorum"] = quorumInfo
|
||
}
|
||
|
||
// Информация о corosync
|
||
corosyncInfo, err := collectCorosyncInfo(ctx)
|
||
if err == nil {
|
||
summary["corosync"] = corosyncInfo
|
||
}
|
||
|
||
// Формируем финальный результат с точной структурой
|
||
result := map[string]any{
|
||
"collector_name": "proxcluster",
|
||
"summary": summary,
|
||
"nodes": nodesInfo,
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
// collectClusterInfo читает основную информацию о кластере из corosync.conf и pvesh
|
||
func collectClusterInfo(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// Читаем corosync.conf
|
||
corosyncPath := "/etc/corosync/corosync.conf"
|
||
if _, err := os.Stat(corosyncPath); os.IsNotExist(err) {
|
||
// Пробуем альтернативные пути
|
||
altPaths := []string{
|
||
"/etc/pve/corosync.conf",
|
||
"/var/lib/pve-cluster/corosync.conf",
|
||
}
|
||
for _, path := range altPaths {
|
||
if _, err := os.Stat(path); err == nil {
|
||
corosyncPath = path
|
||
break
|
||
}
|
||
}
|
||
}
|
||
|
||
clusterName, clusterUUID, err := parseCorosyncConf(corosyncPath)
|
||
if err != nil {
|
||
return result, fmt.Errorf("failed to parse corosync.conf: %w", err)
|
||
}
|
||
|
||
result["name"] = clusterName
|
||
result["cluster_uuid"] = clusterUUID
|
||
result["cluster_id"] = generateClusterID(clusterName, clusterUUID)
|
||
|
||
// Версия кластера
|
||
if version, err := getClusterVersion(ctx); err == nil {
|
||
result["version"] = version
|
||
}
|
||
|
||
// Дополнительная информация о кластере через pvesh
|
||
if pveshInfo, err := getClusterInfoFromPvesh(ctx); err == nil {
|
||
for k, v := range pveshInfo {
|
||
result[k] = v
|
||
}
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
// parseCorosyncConf парсит corosync.conf и извлекает cluster_name и cluster_uuid
|
||
func parseCorosyncConf(path string) (string, string, error) {
|
||
file, err := os.Open(path)
|
||
if err != nil {
|
||
return "", "", err
|
||
}
|
||
defer file.Close()
|
||
|
||
var clusterName, clusterUUID string
|
||
scanner := bufio.NewScanner(file)
|
||
|
||
for scanner.Scan() {
|
||
line := strings.TrimSpace(scanner.Text())
|
||
|
||
// Пропускаем комментарии и пустые строки
|
||
if strings.HasPrefix(line, "#") || line == "" {
|
||
continue
|
||
}
|
||
|
||
// Ищем cluster_name
|
||
if strings.HasPrefix(line, "cluster_name:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
clusterName = strings.TrimSpace(parts[1])
|
||
}
|
||
}
|
||
|
||
// Ищем cluster_uuid
|
||
if strings.HasPrefix(line, "cluster_uuid:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
clusterUUID = strings.TrimSpace(parts[1])
|
||
}
|
||
}
|
||
}
|
||
|
||
if err := scanner.Err(); err != nil {
|
||
return "", "", err
|
||
}
|
||
|
||
if clusterName == "" {
|
||
return "", "", errors.New("cluster_name not found in corosync.conf")
|
||
}
|
||
|
||
// cluster_uuid может быть пустым, это нормально
|
||
|
||
return clusterName, clusterUUID, nil
|
||
}
|
||
|
||
// generateClusterID создает уникальный ID кластера на основе cluster_name + cluster_uuid
|
||
func generateClusterID(clusterName, clusterUUID string) string {
|
||
base := clusterName + ":" + clusterUUID
|
||
hash := sha256.Sum256([]byte(base))
|
||
return hex.EncodeToString(hash[:])[:16]
|
||
}
|
||
|
||
|
||
// getClusterVersion получает версию кластера Proxmox
|
||
func getClusterVersion(ctx context.Context) (string, error) {
|
||
// Пробуем pveversion
|
||
if _, err := exec.LookPath("pveversion"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "pveversion", "-v")
|
||
out, err := cmd.Output()
|
||
if err == nil {
|
||
lines := strings.Split(string(out), "\n")
|
||
for _, line := range lines {
|
||
if strings.Contains(line, "pve-manager") {
|
||
parts := strings.Fields(line)
|
||
if len(parts) >= 2 {
|
||
return parts[1], nil
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Fallback: читаем из файла
|
||
versionFile := "/usr/share/pve-manager/version"
|
||
if data, err := os.ReadFile(versionFile); err == nil {
|
||
return strings.TrimSpace(string(data)), nil
|
||
}
|
||
|
||
return "", errors.New("cluster version not found")
|
||
}
|
||
|
||
// getClusterInfoFromPvesh получает дополнительную информацию о кластере через pvesh
|
||
func getClusterInfoFromPvesh(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// Проверяем наличие pvesh
|
||
if _, err := exec.LookPath("pvesh"); err != nil {
|
||
return result, fmt.Errorf("pvesh not found: %w", err)
|
||
}
|
||
|
||
// Список endpoints для сбора информации о кластере (очищенный от лишних данных)
|
||
clusterEndpoints := []string{
|
||
"/cluster/status",
|
||
"/cluster/config/corosync",
|
||
"/cluster/tasks",
|
||
"/cluster/config/apiclient",
|
||
}
|
||
|
||
// Собираем данные со всех endpoints
|
||
for _, endpoint := range clusterEndpoints {
|
||
cmd := exec.CommandContext(ctx, "pvesh", "get", endpoint, "--output-format", "json")
|
||
out, err := cmd.Output()
|
||
if err != nil {
|
||
// Пропускаем endpoints, которые недоступны или требуют прав
|
||
continue
|
||
}
|
||
|
||
// Определяем имя поля на основе endpoint
|
||
fieldName := strings.ReplaceAll(endpoint, "/cluster/", "cluster_")
|
||
fieldName = strings.ReplaceAll(fieldName, "/", "_")
|
||
|
||
// Парсим JSON ответ
|
||
var jsonData any
|
||
if err := json.Unmarshal(out, &jsonData); err == nil {
|
||
result[fieldName] = jsonData
|
||
}
|
||
}
|
||
|
||
// Получаем информацию о нодах через /nodes
|
||
nodesInfo, err := getNodesInfoFromPvesh(ctx)
|
||
if err == nil {
|
||
result["nodes_info"] = nodesInfo
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
// getNodesInfoFromPvesh получает информацию о нодах через pvesh get /nodes
|
||
func getNodesInfoFromPvesh(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// Получаем список нод
|
||
cmd := exec.CommandContext(ctx, "pvesh", "get", "/nodes", "--output-format", "json")
|
||
out, err := cmd.Output()
|
||
if err != nil {
|
||
return result, fmt.Errorf("failed to get nodes list: %w", err)
|
||
}
|
||
|
||
// Парсим JSON ответ
|
||
var nodesData []map[string]any
|
||
if err := json.Unmarshal(out, &nodesData); err != nil {
|
||
return result, fmt.Errorf("failed to parse nodes JSON: %w", err)
|
||
}
|
||
|
||
// Обрабатываем каждую ноду
|
||
var nodesInfo []map[string]any
|
||
for _, node := range nodesData {
|
||
nodeName := ""
|
||
if name, ok := node["node"].(string); ok {
|
||
nodeName = name
|
||
}
|
||
|
||
nodeInfo := map[string]any{
|
||
"node": node["node"],
|
||
"status": node["status"],
|
||
"cpu": node["cpu"],
|
||
"level": node["level"],
|
||
"id": node["id"],
|
||
"type": node["type"],
|
||
"maxcpu": node["maxcpu"],
|
||
"maxmem": node["maxmem"],
|
||
"mem": node["mem"],
|
||
"disk": node["disk"],
|
||
"maxdisk": node["maxdisk"],
|
||
"uptime": node["uptime"],
|
||
}
|
||
|
||
// Получаем дополнительную информацию о ноде
|
||
if nodeName != "" {
|
||
// Статус ноды
|
||
statusCmd := exec.CommandContext(ctx, "pvesh", "get", fmt.Sprintf("/nodes/%s/status", nodeName), "--output-format", "json")
|
||
if statusOut, err := statusCmd.Output(); err == nil {
|
||
var statusData map[string]any
|
||
if err := json.Unmarshal(statusOut, &statusData); err == nil {
|
||
nodeInfo["status_details"] = statusData
|
||
}
|
||
}
|
||
|
||
// Ресурсы ноды
|
||
resourceCmd := exec.CommandContext(ctx, "pvesh", "get", fmt.Sprintf("/nodes/%s/resources", nodeName), "--output-format", "json")
|
||
if resourceOut, err := resourceCmd.Output(); err == nil {
|
||
var resourceData []map[string]any
|
||
if err := json.Unmarshal(resourceOut, &resourceData); err == nil {
|
||
nodeInfo["resources"] = resourceData
|
||
}
|
||
}
|
||
|
||
// Конфигурация ноды
|
||
configCmd := exec.CommandContext(ctx, "pvesh", "get", fmt.Sprintf("/nodes/%s/config", nodeName), "--output-format", "json")
|
||
if configOut, err := configCmd.Output(); err == nil {
|
||
var configData map[string]any
|
||
if err := json.Unmarshal(configOut, &configData); err == nil {
|
||
nodeInfo["config"] = configData
|
||
}
|
||
}
|
||
|
||
// Сетевая информация
|
||
networkCmd := exec.CommandContext(ctx, "pvesh", "get", fmt.Sprintf("/nodes/%s/network", nodeName), "--output-format", "json")
|
||
if networkOut, err := networkCmd.Output(); err == nil {
|
||
var networkData []map[string]any
|
||
if err := json.Unmarshal(networkOut, &networkData); err == nil {
|
||
nodeInfo["network"] = networkData
|
||
}
|
||
}
|
||
|
||
// Информация о хранилищах ноды
|
||
storageCmd := exec.CommandContext(ctx, "pvesh", "get", fmt.Sprintf("/nodes/%s/storage", nodeName), "--output-format", "json")
|
||
if storageOut, err := storageCmd.Output(); err == nil {
|
||
var storageData []map[string]any
|
||
if err := json.Unmarshal(storageOut, &storageData); err == nil {
|
||
nodeInfo["storage"] = storageData
|
||
}
|
||
}
|
||
}
|
||
|
||
nodesInfo = append(nodesInfo, nodeInfo)
|
||
}
|
||
|
||
result["nodes"] = nodesInfo
|
||
return result, nil
|
||
}
|
||
|
||
// collectQuorumInfo получает подробную информацию о кворуме кластера
|
||
func collectQuorumInfo(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{
|
||
"quorate": false,
|
||
"members": 0,
|
||
"total_votes": 0,
|
||
"expected_votes": 0,
|
||
}
|
||
|
||
// Пробуем corosync-quorumtool
|
||
if _, err := exec.LookPath("corosync-quorumtool"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "corosync-quorumtool", "-s")
|
||
out, err := cmd.Output()
|
||
if err == nil {
|
||
return result, err
|
||
}
|
||
|
||
lines := strings.Split(string(out), "\n")
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
|
||
if strings.Contains(line, "Quorate:") {
|
||
result["quorate"] = strings.Contains(line, "Yes")
|
||
}
|
||
if strings.HasPrefix(line, "Nodes:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
if count, err := strconv.Atoi(strings.TrimSpace(parts[1])); err == nil {
|
||
result["members"] = count
|
||
}
|
||
}
|
||
}
|
||
if strings.HasPrefix(line, "Total votes:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
if votes, err := strconv.Atoi(strings.TrimSpace(parts[1])); err == nil {
|
||
result["total_votes"] = votes
|
||
}
|
||
}
|
||
}
|
||
if strings.HasPrefix(line, "Expected votes:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
if votes, err := strconv.Atoi(strings.TrimSpace(parts[1])); err == nil {
|
||
result["expected_votes"] = votes
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
|
||
|
||
|
||
|
||
// collectStoragesInfo собирает информацию о хранилищах кластера
|
||
func collectStoragesInfo(ctx context.Context) ([]map[string]any, error) {
|
||
var storages []map[string]any
|
||
|
||
// Пробуем pvesm status
|
||
if _, err := exec.LookPath("pvesm"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "pvesm", "status")
|
||
out, err := cmd.Output()
|
||
if err != nil {
|
||
return storages, err
|
||
}
|
||
|
||
lines := strings.Split(string(out), "\n")
|
||
for i, line := range lines {
|
||
if i == 0 { // пропускаем заголовок
|
||
continue
|
||
}
|
||
line = strings.TrimSpace(line)
|
||
if line == "" {
|
||
continue
|
||
}
|
||
|
||
fields := strings.Fields(line)
|
||
if len(fields) >= 4 {
|
||
storage := map[string]any{
|
||
"storage_id": fields[0],
|
||
"type": fields[1],
|
||
"content": strings.Split(fields[3], ","),
|
||
"shared": false,
|
||
}
|
||
|
||
// Парсим размеры если есть
|
||
if len(fields) >= 7 {
|
||
// Парсим размер в формате "500.00G"
|
||
if sizeStr := fields[4]; sizeStr != "-" {
|
||
if size, err := parseSizeToGB(sizeStr); err == nil {
|
||
storage["total_gb"] = size
|
||
}
|
||
}
|
||
if usedStr := fields[5]; usedStr != "-" {
|
||
if used, err := parseSizeToGB(usedStr); err == nil {
|
||
storage["used_gb"] = used
|
||
}
|
||
}
|
||
if availStr := fields[6]; availStr != "-" {
|
||
if avail, err := parseSizeToGB(availStr); err == nil {
|
||
storage["avail_gb"] = avail
|
||
}
|
||
}
|
||
}
|
||
|
||
// Определяем shared по типу
|
||
sharedTypes := []string{"nfs", "cifs", "glusterfs", "cephfs"}
|
||
for _, st := range sharedTypes {
|
||
if fields[1] == st {
|
||
storage["shared"] = true
|
||
break
|
||
}
|
||
}
|
||
|
||
storages = append(storages, storage)
|
||
}
|
||
}
|
||
}
|
||
|
||
return storages, nil
|
||
}
|
||
|
||
// parseSizeToGB парсит размер в формате "500.00G" в гигабайты
|
||
func parseSizeToGB(sizeStr string) (float64, error) {
|
||
sizeStr = strings.TrimSpace(sizeStr)
|
||
if sizeStr == "" || sizeStr == "-" {
|
||
return 0, nil
|
||
}
|
||
|
||
// Убираем суффикс и парсим число
|
||
var multiplier float64 = 1
|
||
if strings.HasSuffix(sizeStr, "T") {
|
||
multiplier = 1024
|
||
sizeStr = strings.TrimSuffix(sizeStr, "T")
|
||
} else if strings.HasSuffix(sizeStr, "G") {
|
||
multiplier = 1
|
||
sizeStr = strings.TrimSuffix(sizeStr, "G")
|
||
} else if strings.HasSuffix(sizeStr, "M") {
|
||
multiplier = 1.0 / 1024
|
||
sizeStr = strings.TrimSuffix(sizeStr, "M")
|
||
}
|
||
|
||
value, err := strconv.ParseFloat(sizeStr, 64)
|
||
if err != nil {
|
||
return 0, err
|
||
}
|
||
|
||
return value * multiplier, nil
|
||
}
|
||
|
||
// collectDetailedNodesInfo собирает подробную информацию о нодах кластера
|
||
func collectDetailedNodesInfo(ctx context.Context, clusterName, clusterUUID string) ([]map[string]any, error) {
|
||
var nodes []map[string]any
|
||
|
||
// Получаем данные из pvecm nodes (имена нод)
|
||
nodesData := parsePvecmNodes(ctx)
|
||
|
||
// Получаем данные из pvecm status (IP адреса)
|
||
statusData := parsePvecmStatus(ctx)
|
||
|
||
// Объединяем данные
|
||
combinedNodes := combineNodeInfo(nodesData, statusData)
|
||
|
||
// Обрабатываем объединенные данные
|
||
for _, nodeInfo := range combinedNodes {
|
||
nodeID := fmt.Sprintf("%08x", nodeInfo.NodeID)
|
||
nodeName := nodeInfo.Name
|
||
nodeIP := nodeInfo.IP
|
||
|
||
// Определяем, является ли нода локальной
|
||
isLocal := strings.Contains(nodeIP, "10.14.88.12") // IP локальной ноды
|
||
if isLocal {
|
||
// Для локальной ноды получаем имя хоста
|
||
if hostname, err := os.Hostname(); err == nil {
|
||
nodeName = hostname
|
||
}
|
||
}
|
||
|
||
// Проверяем доступность ноды через ping
|
||
isOnline := checkNodeOnline(ctx, nodeIP)
|
||
|
||
// Создаем структуру ноды с правильным порядком полей
|
||
node := map[string]any{
|
||
// Общая информация о ноде (выводится первой)
|
||
"node_id": nodeInfo.NodeID, // Используем обычный формат вместо hex
|
||
"name": nodeName,
|
||
"online": isOnline,
|
||
"cluster_id": generateClusterID(clusterName, clusterUUID),
|
||
"cluster_uuid": clusterUUID,
|
||
"node_uid": generateNodeUID(clusterUUID, nodeID),
|
||
}
|
||
|
||
// Если нода онлайн, собираем дополнительную информацию
|
||
if isOnline {
|
||
// Corosync IP адрес ноды
|
||
node["corosync_ip"] = nodeIP
|
||
|
||
// Информация о машине
|
||
if machineInfo, err := getNodeMachineInfo(ctx); err == nil {
|
||
for k, v := range machineInfo {
|
||
node[k] = v
|
||
}
|
||
}
|
||
|
||
// Информация об ОС
|
||
if osInfo, err := getNodeOSInfo(ctx); err == nil {
|
||
node["os"] = osInfo
|
||
}
|
||
|
||
// Ресурсы (использование CPU, памяти, load average)
|
||
if resInfo, err := getNodeResources(ctx); err == nil {
|
||
node["resources"] = resInfo
|
||
}
|
||
|
||
// Real IPs (реальные IP адреса ноды)
|
||
if realIPs, err := getNodeRealIPs(ctx, nodeIP); err == nil {
|
||
node["real_ips"] = realIPs
|
||
}
|
||
|
||
// Детальная информация о железе (выводится после общей информации)
|
||
if hwInfo, err := getNodeHardwareInfo(ctx); err == nil {
|
||
node["hardware"] = hwInfo
|
||
}
|
||
|
||
// Информация о виртуальных машинах на ноде
|
||
if vmInfo, err := getNodeVMInfo(ctx, nodeName); err == nil {
|
||
node["vm_summary"] = vmInfo
|
||
}
|
||
} else {
|
||
// Для офлайн нод заполняем пустыми значениями в правильном порядке
|
||
node["corosync_ip"] = ""
|
||
node["machine_id"] = ""
|
||
node["product_uuid"] = ""
|
||
node["os"] = map[string]any{
|
||
"kernel": "",
|
||
"pve_version": "",
|
||
"uptime_sec": 0,
|
||
}
|
||
node["resources"] = map[string]any{
|
||
"cpu_usage_percent": 0,
|
||
"memory_used_mb": 0,
|
||
"swap_used_mb": 0,
|
||
"loadavg": []float64{0, 0, 0},
|
||
}
|
||
node["real_ips"] = []string{}
|
||
node["hardware"] = map[string]any{
|
||
"cpu_model": "",
|
||
"cpu_cores": 0,
|
||
"sockets": 0,
|
||
"threads": 0,
|
||
"memory_total_mb": 0,
|
||
}
|
||
node["vm_summary"] = map[string]any{
|
||
"total_vms": 0,
|
||
"running_vms": 0,
|
||
"stopped_vms": 0,
|
||
"total_containers": 0,
|
||
"running_containers": 0,
|
||
"stopped_containers": 0,
|
||
"total_cpu_cores": 0,
|
||
"total_memory_mb": 0,
|
||
"used_cpu_cores": 0,
|
||
"used_memory_mb": 0,
|
||
}
|
||
}
|
||
|
||
nodes = append(nodes, node)
|
||
}
|
||
|
||
return nodes, nil
|
||
}
|
||
|
||
// NodeInfo структура для хранения информации о ноде
|
||
type NodeInfo struct {
|
||
NodeID int
|
||
Votes int
|
||
Name string
|
||
IP string
|
||
}
|
||
|
||
// parsePvecmNodes парсит вывод pvecm nodes для получения имен нод
|
||
func parsePvecmNodes(ctx context.Context) []NodeInfo {
|
||
var nodes []NodeInfo
|
||
|
||
if _, err := exec.LookPath("pvecm"); err != nil {
|
||
return nodes
|
||
}
|
||
|
||
cmd := exec.CommandContext(ctx, "pvecm", "nodes")
|
||
out, err := cmd.Output()
|
||
if err != nil {
|
||
return nodes
|
||
}
|
||
|
||
lines := strings.Split(string(out), "\n")
|
||
inDataSection := false
|
||
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
|
||
// Пропускаем заголовок
|
||
if strings.Contains(line, "Nodeid") && strings.Contains(line, "Votes") && strings.Contains(line, "Name") {
|
||
inDataSection = true
|
||
continue
|
||
}
|
||
|
||
if inDataSection {
|
||
if line == "" {
|
||
continue
|
||
}
|
||
|
||
// Парсим строки с данными нод: "1 1 pnode12"
|
||
fields := strings.Fields(line)
|
||
if len(fields) >= 3 {
|
||
if nodeID, err := strconv.Atoi(fields[0]); err == nil {
|
||
if votes, err := strconv.Atoi(fields[1]); err == nil {
|
||
name := fields[2]
|
||
|
||
nodes = append(nodes, NodeInfo{
|
||
NodeID: nodeID,
|
||
Votes: votes,
|
||
Name: name,
|
||
})
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return nodes
|
||
}
|
||
|
||
// parsePvecmStatus парсит вывод pvecm status для получения IP адресов
|
||
func parsePvecmStatus(ctx context.Context) []NodeInfo {
|
||
var status []NodeInfo
|
||
|
||
if _, err := exec.LookPath("pvecm"); err != nil {
|
||
return status
|
||
}
|
||
|
||
cmd := exec.CommandContext(ctx, "pvecm", "status")
|
||
out, err := cmd.Output()
|
||
if err != nil {
|
||
return status
|
||
}
|
||
|
||
lines := strings.Split(string(out), "\n")
|
||
inMembershipSection := false
|
||
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
|
||
// Находим секцию Membership information
|
||
if strings.Contains(line, "Membership information") {
|
||
inMembershipSection = true
|
||
continue
|
||
}
|
||
|
||
if inMembershipSection {
|
||
// Пропускаем заголовки и разделители
|
||
if strings.Contains(line, "Nodeid") || strings.Contains(line, "----") || line == "" {
|
||
continue
|
||
}
|
||
|
||
// Парсим строки с данными нод: "0x00000001 1 10.14.88.22"
|
||
fields := strings.Fields(line)
|
||
if len(fields) >= 3 {
|
||
// Конвертируем hex в decimal
|
||
nodeIDHex := strings.TrimPrefix(fields[0], "0x")
|
||
if nodeID, err := strconv.ParseInt(nodeIDHex, 16, 32); err == nil {
|
||
if votes, err := strconv.Atoi(fields[1]); err == nil {
|
||
ip := fields[2]
|
||
|
||
status = append(status, NodeInfo{
|
||
NodeID: int(nodeID),
|
||
Votes: votes,
|
||
IP: ip,
|
||
})
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return status
|
||
}
|
||
|
||
// combineNodeInfo объединяет данные из pvecm nodes и pvecm status
|
||
func combineNodeInfo(nodes, status []NodeInfo) []NodeInfo {
|
||
var combined []NodeInfo
|
||
|
||
// Создаем мапы для быстрого поиска
|
||
nodesMap := make(map[int]NodeInfo)
|
||
statusMap := make(map[int]NodeInfo)
|
||
|
||
for _, node := range nodes {
|
||
nodesMap[node.NodeID] = node
|
||
}
|
||
|
||
for _, stat := range status {
|
||
statusMap[stat.NodeID] = stat
|
||
}
|
||
|
||
// Объединяем данные
|
||
for i := 1; i <= 32; i++ {
|
||
if node, ok := nodesMap[i]; ok {
|
||
if stat, ok := statusMap[i]; ok {
|
||
combined = append(combined, NodeInfo{
|
||
NodeID: i,
|
||
Votes: node.Votes,
|
||
Name: node.Name,
|
||
IP: stat.IP,
|
||
})
|
||
}
|
||
}
|
||
}
|
||
|
||
return combined
|
||
}
|
||
|
||
// checkNodeOnline проверяет доступность ноды через ping
|
||
func checkNodeOnline(ctx context.Context, nodeIP string) bool {
|
||
// Создаем контекст с таймаутом для ping
|
||
pingCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||
defer cancel()
|
||
|
||
// Выполняем ping с 1 пакетом
|
||
cmd := exec.CommandContext(pingCtx, "ping", "-c", "1", "-W", "1", nodeIP)
|
||
err := cmd.Run()
|
||
return err == nil
|
||
}
|
||
|
||
// generateNodeUID создает уникальный ID ноды на основе cluster_uuid + node_id
|
||
func generateNodeUID(clusterUUID, nodeID string) string {
|
||
base := clusterUUID + ":" + nodeID
|
||
hash := sha256.Sum256([]byte(base))
|
||
return hex.EncodeToString(hash[:])[:16]
|
||
}
|
||
|
||
// collectCorosyncInfo собирает информацию о corosync
|
||
func collectCorosyncInfo(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// Читаем corosync.conf
|
||
corosyncPath := "/etc/corosync/corosync.conf"
|
||
if _, err := os.Stat(corosyncPath); os.IsNotExist(err) {
|
||
altPaths := []string{
|
||
"/etc/pve/corosync.conf",
|
||
"/var/lib/pve-cluster/corosync.conf",
|
||
}
|
||
for _, path := range altPaths {
|
||
if _, err := os.Stat(path); err == nil {
|
||
corosyncPath = path
|
||
break
|
||
}
|
||
}
|
||
}
|
||
|
||
if data, err := os.ReadFile(corosyncPath); err == nil {
|
||
// Парсим основные параметры corosync
|
||
lines := strings.Split(string(data), "\n")
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
if strings.HasPrefix(line, "#") || line == "" {
|
||
continue
|
||
}
|
||
|
||
if strings.HasPrefix(line, "bindnetaddr:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
result["bindnetaddr"] = strings.TrimSpace(parts[1])
|
||
}
|
||
}
|
||
if strings.HasPrefix(line, "mcastport:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
if port, err := strconv.Atoi(strings.TrimSpace(parts[1])); err == nil {
|
||
result["mcastport"] = port
|
||
}
|
||
}
|
||
}
|
||
if strings.HasPrefix(line, "ttl:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
if ttl, err := strconv.Atoi(strings.TrimSpace(parts[1])); err == nil {
|
||
result["ttl"] = ttl
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Статус corosync
|
||
if _, err := exec.LookPath("corosync-quorumtool"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "corosync-quorumtool", "-s")
|
||
if out, err := cmd.Output(); err == nil {
|
||
lines := strings.Split(string(out), "\n")
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
if strings.HasPrefix(line, "Quorum provider:") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
result["quorum_provider"] = strings.TrimSpace(parts[1])
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
// Вспомогательные функции для сбора информации о нодах
|
||
func getNodeIP(ctx context.Context, nodeName string) (string, error) {
|
||
// Пробуем получить IP через hostname
|
||
cmd := exec.CommandContext(ctx, "getent", "hosts", nodeName)
|
||
out, err := cmd.Output()
|
||
if err == nil {
|
||
fields := strings.Fields(string(out))
|
||
if len(fields) > 0 {
|
||
return fields[0], nil
|
||
}
|
||
}
|
||
return "", errors.New("node IP not found")
|
||
}
|
||
|
||
func getNodeMachineInfo(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// Machine ID
|
||
if data, err := os.ReadFile("/etc/machine-id"); err == nil {
|
||
result["machine_id"] = strings.TrimSpace(string(data))
|
||
}
|
||
|
||
// Product UUID
|
||
if data, err := os.ReadFile("/sys/class/dmi/id/product_uuid"); err == nil {
|
||
result["product_uuid"] = strings.TrimSpace(string(data))
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
func getNodeOSInfo(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// Kernel
|
||
if data, err := os.ReadFile("/proc/version"); err == nil {
|
||
version := strings.TrimSpace(string(data))
|
||
if parts := strings.Fields(version); len(parts) >= 3 {
|
||
result["kernel"] = strings.Join(parts[0:3], " ")
|
||
}
|
||
}
|
||
|
||
// PVE version
|
||
if _, err := exec.LookPath("pveversion"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "pveversion", "-v")
|
||
if out, err := cmd.Output(); err == nil {
|
||
lines := strings.Split(string(out), "\n")
|
||
for _, line := range lines {
|
||
if strings.Contains(line, "pve-manager") {
|
||
parts := strings.Fields(line)
|
||
if len(parts) >= 2 {
|
||
result["pve_version"] = parts[1]
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Uptime
|
||
if data, err := os.ReadFile("/proc/uptime"); err == nil {
|
||
fields := strings.Fields(string(data))
|
||
if len(fields) > 0 {
|
||
if uptime, err := strconv.ParseFloat(fields[0], 64); err == nil {
|
||
result["uptime_sec"] = int64(uptime)
|
||
}
|
||
}
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
func getNodeHardwareInfo(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// CPU информация
|
||
if data, err := os.ReadFile("/proc/cpuinfo"); err == nil {
|
||
lines := strings.Split(string(data), "\n")
|
||
var cpuModel string
|
||
var cores, sockets int
|
||
seenModels := make(map[string]bool)
|
||
seenSockets := make(map[string]bool)
|
||
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
if strings.HasPrefix(line, "model name") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
model := strings.TrimSpace(parts[1])
|
||
if !seenModels[model] {
|
||
cpuModel = model
|
||
seenModels[model] = true
|
||
}
|
||
}
|
||
}
|
||
if strings.HasPrefix(line, "processor") {
|
||
cores++
|
||
}
|
||
if strings.HasPrefix(line, "physical id") {
|
||
parts := strings.SplitN(line, ":", 2)
|
||
if len(parts) == 2 {
|
||
socket := strings.TrimSpace(parts[1])
|
||
if !seenSockets[socket] {
|
||
sockets++
|
||
seenSockets[socket] = true
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
result["cpu_model"] = cpuModel
|
||
result["cpu_cores"] = cores
|
||
result["sockets"] = sockets
|
||
result["threads"] = cores // В упрощенном виде
|
||
}
|
||
|
||
// Memory
|
||
if data, err := os.ReadFile("/proc/meminfo"); err == nil {
|
||
lines := strings.Split(string(data), "\n")
|
||
for _, line := range lines {
|
||
if strings.HasPrefix(line, "MemTotal:") {
|
||
fields := strings.Fields(line)
|
||
if len(fields) >= 2 {
|
||
if kb, err := strconv.ParseUint(fields[1], 10, 64); err == nil {
|
||
result["memory_total_mb"] = int(kb / 1024)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
func getNodeResources(ctx context.Context) (map[string]any, error) {
|
||
result := map[string]any{}
|
||
|
||
// CPU usage (упрощенная версия)
|
||
result["cpu_usage_percent"] = 0.0
|
||
|
||
// Memory usage
|
||
if data, err := os.ReadFile("/proc/meminfo"); err == nil {
|
||
lines := strings.Split(string(data), "\n")
|
||
var total, free, buffers, cached uint64
|
||
for _, line := range lines {
|
||
fields := strings.Fields(line)
|
||
if len(fields) >= 2 {
|
||
if val, err := strconv.ParseUint(fields[1], 10, 64); err == nil {
|
||
switch fields[0] {
|
||
case "MemTotal:":
|
||
total = val
|
||
case "MemFree:":
|
||
free = val
|
||
case "Buffers:":
|
||
buffers = val
|
||
case "Cached:":
|
||
cached = val
|
||
}
|
||
}
|
||
}
|
||
}
|
||
used := total - free - buffers - cached
|
||
result["memory_used_mb"] = int(used / 1024)
|
||
}
|
||
|
||
// Swap
|
||
result["swap_used_mb"] = 0
|
||
|
||
// Load average
|
||
if data, err := os.ReadFile("/proc/loadavg"); err == nil {
|
||
fields := strings.Fields(string(data))
|
||
if len(fields) >= 3 {
|
||
var loadavg []float64
|
||
for i := 0; i < 3; i++ {
|
||
if val, err := strconv.ParseFloat(fields[i], 64); err == nil {
|
||
loadavg = append(loadavg, val)
|
||
}
|
||
}
|
||
result["loadavg"] = loadavg
|
||
}
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
func getNodeNetworkInfo(ctx context.Context) ([]map[string]any, error) {
|
||
var networks []map[string]any
|
||
|
||
// Упрощенная версия - только основные интерфейсы
|
||
interfaces := []string{"eth0", "ens33", "enp0s3", "vmbr0"}
|
||
for _, iface := range interfaces {
|
||
// Проверяем существование интерфейса
|
||
if _, err := os.Stat("/sys/class/net/" + iface); err == nil {
|
||
network := map[string]any{
|
||
"iface": iface,
|
||
}
|
||
|
||
// MAC адрес
|
||
if data, err := os.ReadFile("/sys/class/net/" + iface + "/address"); err == nil {
|
||
network["mac"] = strings.TrimSpace(string(data))
|
||
}
|
||
|
||
// IP адрес (упрощенно)
|
||
network["ip"] = ""
|
||
network["rx_bytes"] = 0
|
||
network["tx_bytes"] = 0
|
||
network["errors"] = 0
|
||
|
||
// Тип для bridge
|
||
if strings.HasPrefix(iface, "vmbr") {
|
||
network["type"] = "bridge"
|
||
}
|
||
|
||
networks = append(networks, network)
|
||
}
|
||
}
|
||
|
||
return networks, nil
|
||
}
|
||
|
||
func getNodeDiskInfo(ctx context.Context) ([]map[string]any, error) {
|
||
var disks []map[string]any
|
||
|
||
// Упрощенная версия - только основные диски
|
||
diskPaths := []string{"/dev/sda", "/dev/nvme0n1", "/dev/vda"}
|
||
for _, disk := range diskPaths {
|
||
if _, err := os.Stat(disk); err == nil {
|
||
diskInfo := map[string]any{
|
||
"device": disk,
|
||
"model": "",
|
||
"size_gb": 0,
|
||
"used_gb": 0,
|
||
"health": "UNKNOWN",
|
||
}
|
||
|
||
// Попытка получить размер
|
||
if data, err := os.ReadFile("/sys/block/" + strings.TrimPrefix(disk, "/dev/") + "/size"); err == nil {
|
||
if size, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64); err == nil {
|
||
diskInfo["size_gb"] = int(size * 512 / 1024 / 1024 / 1024) // секторы -> GB
|
||
}
|
||
}
|
||
|
||
disks = append(disks, diskInfo)
|
||
}
|
||
}
|
||
|
||
return disks, nil
|
||
}
|
||
|
||
func getNodeServices(ctx context.Context) ([]map[string]any, error) {
|
||
var services []map[string]any
|
||
|
||
serviceNames := []string{"pve-cluster", "pvedaemon", "pveproxy", "corosync"}
|
||
for _, svc := range serviceNames {
|
||
service := map[string]any{
|
||
"name": svc,
|
||
"active": isServiceRunning(ctx, svc),
|
||
}
|
||
services = append(services, service)
|
||
}
|
||
|
||
return services, nil
|
||
}
|
||
|
||
func getNodeLogs(ctx context.Context) ([]map[string]any, error) {
|
||
// Упрощенная версия - возвращаем пустой массив
|
||
// В реальной реализации можно читать логи из /var/log/pve/
|
||
return []map[string]any{}, nil
|
||
}
|
||
|
||
func getNodeGPUInfo(ctx context.Context) ([]map[string]any, error) {
|
||
var gpus []map[string]any
|
||
|
||
// Пробуем nvidia-smi для NVIDIA GPU
|
||
if _, err := exec.LookPath("nvidia-smi"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "nvidia-smi", "--query-gpu=index,name,memory.total,memory.used,utilization.gpu,temperature.gpu", "--format=csv,noheader,nounits")
|
||
out, err := cmd.Output()
|
||
if err == nil {
|
||
lines := strings.Split(string(out), "\n")
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
if line == "" {
|
||
continue
|
||
}
|
||
|
||
fields := strings.Split(line, ", ")
|
||
if len(fields) >= 6 {
|
||
gpu := map[string]any{
|
||
"index": 0,
|
||
"model": "",
|
||
"memory_total_mb": 0,
|
||
"memory_used_mb": 0,
|
||
"utilization_percent": 0.0,
|
||
"temperature_c": 0.0,
|
||
}
|
||
|
||
// Парсим индекс
|
||
if idx, err := strconv.Atoi(fields[0]); err == nil {
|
||
gpu["index"] = idx
|
||
}
|
||
|
||
// Модель GPU
|
||
gpu["model"] = strings.TrimSpace(fields[1])
|
||
|
||
// Память (в МБ)
|
||
if total, err := strconv.Atoi(fields[2]); err == nil {
|
||
gpu["memory_total_mb"] = total
|
||
}
|
||
if used, err := strconv.Atoi(fields[3]); err == nil {
|
||
gpu["memory_used_mb"] = used
|
||
}
|
||
|
||
// Утилизация
|
||
if util, err := strconv.ParseFloat(fields[4], 64); err == nil {
|
||
gpu["utilization_percent"] = util
|
||
}
|
||
|
||
// Температура
|
||
if temp, err := strconv.ParseFloat(fields[5], 64); err == nil {
|
||
gpu["temperature_c"] = temp
|
||
}
|
||
|
||
gpus = append(gpus, gpu)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Пробуем lspci для других GPU (AMD, Intel)
|
||
if len(gpus) == 0 {
|
||
cmd := exec.CommandContext(ctx, "lspci", "-nn")
|
||
out, err := cmd.Output()
|
||
if err == nil {
|
||
lines := strings.Split(string(out), "\n")
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
if strings.Contains(line, "VGA") || strings.Contains(line, "3D") || strings.Contains(line, "Display") {
|
||
// Простая обработка для не-NVIDIA GPU
|
||
gpu := map[string]any{
|
||
"index": 0,
|
||
"model": line,
|
||
"memory_total_mb": 0,
|
||
"memory_used_mb": 0,
|
||
"utilization_percent": 0.0,
|
||
"temperature_c": 0.0,
|
||
}
|
||
gpus = append(gpus, gpu)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return gpus, nil
|
||
}
|
||
|
||
// getNodeRealIPs получает реальные IP адреса ноды (исключая corosync IP)
|
||
func getNodeRealIPs(ctx context.Context, corosyncIP string) ([]string, error) {
|
||
var realIPs []string
|
||
|
||
// НЕ добавляем corosync IP в real_ips
|
||
|
||
// Пробуем получить дополнительные IP через ip addr
|
||
if _, err := exec.LookPath("ip"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "ip", "addr", "show")
|
||
out, err := cmd.Output()
|
||
if err == nil {
|
||
lines := strings.Split(string(out), "\n")
|
||
for _, line := range lines {
|
||
line = strings.TrimSpace(line)
|
||
// Ищем строки с inet (IPv4 адреса)
|
||
if strings.HasPrefix(line, "inet ") && !strings.Contains(line, "127.0.0.1") {
|
||
parts := strings.Fields(line)
|
||
if len(parts) >= 2 {
|
||
ip := strings.Split(parts[1], "/")[0] // Убираем маску подсети
|
||
|
||
// Исключаем corosync IP
|
||
if corosyncIP != "" && ip == corosyncIP {
|
||
continue
|
||
}
|
||
|
||
// Проверяем, что это не уже добавленный IP
|
||
found := false
|
||
for _, existingIP := range realIPs {
|
||
if existingIP == ip {
|
||
found = true
|
||
break
|
||
}
|
||
}
|
||
if !found {
|
||
realIPs = append(realIPs, ip)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Fallback: пробуем через hostname -I
|
||
if len(realIPs) <= 1 {
|
||
cmd := exec.CommandContext(ctx, "hostname", "-I")
|
||
out, err := cmd.Output()
|
||
if err == nil {
|
||
ips := strings.Fields(string(out))
|
||
for _, ip := range ips {
|
||
ip = strings.TrimSpace(ip)
|
||
if ip != "" && ip != "127.0.0.1" {
|
||
// Исключаем corosync IP
|
||
if corosyncIP != "" && ip == corosyncIP {
|
||
continue
|
||
}
|
||
|
||
// Проверяем, что это не уже добавленный IP
|
||
found := false
|
||
for _, existingIP := range realIPs {
|
||
if existingIP == ip {
|
||
found = true
|
||
break
|
||
}
|
||
}
|
||
if !found {
|
||
realIPs = append(realIPs, ip)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return realIPs, nil
|
||
}
|
||
|
||
|
||
// calculateClusterResources вычисляет агрегированные ресурсы кластера
|
||
func calculateClusterResources(nodes []map[string]any, storages []map[string]any) (map[string]any, error) {
|
||
result := map[string]any{
|
||
"cpu": map[string]any{
|
||
"total_cores": 0,
|
||
"total_sockets": 0,
|
||
"total_threads": 0,
|
||
"online_cores": 0,
|
||
"online_sockets": 0,
|
||
"online_threads": 0,
|
||
},
|
||
"memory": map[string]any{
|
||
"total_mb": 0,
|
||
"used_mb": 0,
|
||
"online_total": 0,
|
||
"online_used": 0,
|
||
},
|
||
"nodes": map[string]any{
|
||
"total": 0,
|
||
"online": 0,
|
||
},
|
||
}
|
||
|
||
// Агрегируем данные по нодам
|
||
totalNodes := 0
|
||
onlineNodes := 0
|
||
totalCores := 0
|
||
totalSockets := 0
|
||
totalThreads := 0
|
||
onlineCores := 0
|
||
onlineSockets := 0
|
||
onlineThreads := 0
|
||
totalMemory := 0
|
||
usedMemory := 0
|
||
onlineTotalMemory := 0
|
||
onlineUsedMemory := 0
|
||
|
||
|
||
for _, node := range nodes {
|
||
totalNodes++
|
||
|
||
// Проверяем статус ноды
|
||
if online, ok := node["online"].(bool); ok && online {
|
||
onlineNodes++
|
||
}
|
||
|
||
// Агрегируем CPU информацию
|
||
if hardware, ok := node["hardware"].(map[string]any); ok {
|
||
if cores, ok := hardware["cpu_cores"].(int); ok {
|
||
totalCores += cores
|
||
if online, ok := node["online"].(bool); ok && online {
|
||
onlineCores += cores
|
||
}
|
||
}
|
||
if sockets, ok := hardware["sockets"].(int); ok {
|
||
totalSockets += sockets
|
||
if online, ok := node["online"].(bool); ok && online {
|
||
onlineSockets += sockets
|
||
}
|
||
}
|
||
if threads, ok := hardware["threads"].(int); ok {
|
||
totalThreads += threads
|
||
if online, ok := node["online"].(bool); ok && online {
|
||
onlineThreads += threads
|
||
}
|
||
}
|
||
if memory, ok := hardware["memory_total_mb"].(int); ok {
|
||
totalMemory += memory
|
||
if online, ok := node["online"].(bool); ok && online {
|
||
onlineTotalMemory += memory
|
||
}
|
||
}
|
||
}
|
||
|
||
// Агрегируем использование памяти
|
||
if resources, ok := node["resources"].(map[string]any); ok {
|
||
if used, ok := resources["memory_used_mb"].(int); ok {
|
||
usedMemory += used
|
||
if online, ok := node["online"].(bool); ok && online {
|
||
onlineUsedMemory += used
|
||
}
|
||
}
|
||
}
|
||
|
||
}
|
||
|
||
// Обновляем результат для нод
|
||
result["cpu"].(map[string]any)["total_cores"] = totalCores
|
||
result["cpu"].(map[string]any)["total_sockets"] = totalSockets
|
||
result["cpu"].(map[string]any)["total_threads"] = totalThreads
|
||
result["cpu"].(map[string]any)["online_cores"] = onlineCores
|
||
result["cpu"].(map[string]any)["online_sockets"] = onlineSockets
|
||
result["cpu"].(map[string]any)["online_threads"] = onlineThreads
|
||
|
||
result["memory"].(map[string]any)["total_mb"] = totalMemory
|
||
result["memory"].(map[string]any)["used_mb"] = usedMemory
|
||
result["memory"].(map[string]any)["online_total"] = onlineTotalMemory
|
||
result["memory"].(map[string]any)["online_used"] = onlineUsedMemory
|
||
|
||
|
||
result["nodes"].(map[string]any)["total"] = totalNodes
|
||
result["nodes"].(map[string]any)["online"] = onlineNodes
|
||
|
||
return result, nil
|
||
}
|
||
|
||
// getNodeVMInfo получает краткую информацию о виртуальных машинах на ноде
|
||
func getNodeVMInfo(ctx context.Context, nodeName string) (map[string]any, error) {
|
||
result := map[string]any{
|
||
"total_vms": 0,
|
||
"running_vms": 0,
|
||
"stopped_vms": 0,
|
||
"total_containers": 0,
|
||
"running_containers": 0,
|
||
"stopped_containers": 0,
|
||
"total_cpu_cores": 0,
|
||
"total_memory_mb": 0,
|
||
"used_cpu_cores": 0,
|
||
"used_memory_mb": 0,
|
||
}
|
||
|
||
// Проверяем наличие pvesh
|
||
if _, err := exec.LookPath("pvesh"); err != nil {
|
||
return result, nil
|
||
}
|
||
|
||
// Получаем информацию о VM (QEMU)
|
||
vmOut, err := exec.CommandContext(ctx, "pvesh", "get", fmt.Sprintf("/nodes/%s/qemu", nodeName), "--output-format", "json").Output()
|
||
if err == nil {
|
||
var vmData []map[string]any
|
||
if err := json.Unmarshal(vmOut, &vmData); err == nil {
|
||
totalVMs := len(vmData)
|
||
runningVMs := 0
|
||
totalCPUCores := 0
|
||
totalMemory := 0
|
||
usedCPUCores := 0
|
||
usedMemory := 0
|
||
|
||
for _, vm := range vmData {
|
||
// Подсчитываем статус
|
||
if status, ok := vm["status"].(string); ok && status == "running" {
|
||
runningVMs++
|
||
}
|
||
|
||
// Подсчитываем ресурсы
|
||
if maxCPU, ok := vm["maxcpu"].(float64); ok {
|
||
totalCPUCores += int(maxCPU)
|
||
}
|
||
if maxMem, ok := vm["maxmem"].(float64); ok {
|
||
totalMemory += int(maxMem)
|
||
}
|
||
if cpu, ok := vm["cpu"].(float64); ok {
|
||
usedCPUCores += int(cpu)
|
||
}
|
||
if mem, ok := vm["mem"].(float64); ok {
|
||
usedMemory += int(mem)
|
||
}
|
||
}
|
||
|
||
result["total_vms"] = totalVMs
|
||
result["running_vms"] = runningVMs
|
||
result["stopped_vms"] = totalVMs - runningVMs
|
||
result["total_cpu_cores"] = totalCPUCores
|
||
result["total_memory_mb"] = totalMemory
|
||
result["used_cpu_cores"] = usedCPUCores
|
||
result["used_memory_mb"] = usedMemory
|
||
}
|
||
}
|
||
|
||
// Получаем информацию о контейнерах (LXC)
|
||
ctOut, err := exec.CommandContext(ctx, "pvesh", "get", fmt.Sprintf("/nodes/%s/lxc", nodeName), "--output-format", "json").Output()
|
||
if err == nil {
|
||
var ctData []map[string]any
|
||
if err := json.Unmarshal(ctOut, &ctData); err == nil {
|
||
totalContainers := len(ctData)
|
||
runningContainers := 0
|
||
|
||
for _, ct := range ctData {
|
||
// Подсчитываем статус
|
||
if status, ok := ct["status"].(string); ok && status == "running" {
|
||
runningContainers++
|
||
}
|
||
|
||
// Добавляем ресурсы контейнеров к общим
|
||
if maxCPU, ok := ct["maxcpu"].(float64); ok {
|
||
result["total_cpu_cores"] = result["total_cpu_cores"].(int) + int(maxCPU)
|
||
}
|
||
if maxMem, ok := ct["maxmem"].(float64); ok {
|
||
result["total_memory_mb"] = result["total_memory_mb"].(int) + int(maxMem)
|
||
}
|
||
if cpu, ok := ct["cpu"].(float64); ok {
|
||
result["used_cpu_cores"] = result["used_cpu_cores"].(int) + int(cpu)
|
||
}
|
||
if mem, ok := ct["mem"].(float64); ok {
|
||
result["used_memory_mb"] = result["used_memory_mb"].(int) + int(mem)
|
||
}
|
||
}
|
||
|
||
result["total_containers"] = totalContainers
|
||
result["running_containers"] = runningContainers
|
||
result["stopped_containers"] = totalContainers - runningContainers
|
||
}
|
||
}
|
||
|
||
return result, nil
|
||
}
|
||
|
||
// isServiceRunning проверяет, запущен ли сервис
|
||
func isServiceRunning(ctx context.Context, serviceName string) bool {
|
||
// Пробуем systemctl
|
||
if _, err := exec.LookPath("systemctl"); err == nil {
|
||
cmd := exec.CommandContext(ctx, "systemctl", "is-active", serviceName)
|
||
err := cmd.Run()
|
||
return err == nil
|
||
}
|
||
|
||
// Fallback: проверяем через ps
|
||
cmd := exec.CommandContext(ctx, "ps", "aux")
|
||
out, err := cmd.Output()
|
||
if err != nil {
|
||
return false
|
||
}
|
||
|
||
return strings.Contains(string(out), serviceName)
|
||
}
|
||
|