init
This commit is contained in:
commit
f89d835c47
136
db/db.sql
Normal file
136
db/db.sql
Normal file
@ -0,0 +1,136 @@
|
||||
DROP TABLE IF EXISTS bem_candidato CASCADE;
|
||||
DROP TABLE IF EXISTS candidato_mapping CASCADE;
|
||||
DROP TABLE IF EXISTS rede_social CASCADE;
|
||||
DROP TABLE IF EXISTS candidato CASCADE;
|
||||
DROP TABLE IF EXISTS partido CASCADE;
|
||||
DROP TABLE IF EXISTS despesas_candidato CASCADE;
|
||||
DROP TABLE IF EXISTS receitas_candidato CASCADE;
|
||||
|
||||
CREATE TABLE candidato (
|
||||
idcandidato UUID NOT NULL PRIMARY KEY,
|
||||
cpf VARCHAR(11),
|
||||
nome VARCHAR(255) NOT NULL,
|
||||
apelido VARCHAR(255),
|
||||
datanascimento TIMESTAMPTZ,
|
||||
email TEXT,
|
||||
sexo CHAR(15),
|
||||
estadocivil VARCHAR(50),
|
||||
escolaridade VARCHAR(50),
|
||||
ocupacao VARCHAR(150)
|
||||
);
|
||||
CREATE INDEX idx_candidato_nome ON candidato (nome);
|
||||
CREATE INDEX idx_candidato_apelido ON candidato (apelido);
|
||||
|
||||
-- Each candidato (idcandidato, cpf, nome) will be mapped to a (sqcandidato, ano, tipo_eleicao, sg_uf, cargo, resultado)
|
||||
CREATE TABLE candidato_mapping (
|
||||
idcandidato UUID NOT NULL,
|
||||
cpf VARCHAR(11),
|
||||
nome VARCHAR(255) NOT NULL,
|
||||
apelido VARCHAR(255),
|
||||
sqcandidato VARCHAR(50) NOT NULL,
|
||||
turno VARCHAR(2) NOT NULL,
|
||||
ano INT NOT NULL,
|
||||
tipoeleicao VARCHAR(50),
|
||||
siglauf VARCHAR(2),
|
||||
nomeue VARCHAR(100),
|
||||
cargo VARCHAR(50),
|
||||
sgpartido VARCHAR(50),
|
||||
nrcandidato VARCHAR(20),
|
||||
resultado VARCHAR(50),
|
||||
CONSTRAINT pk_candidato_mapping PRIMARY KEY (idcandidato, ano, siglauf, nomeue, cargo, nrcandidato, resultado),
|
||||
CONSTRAINT fk_candidato_mapping_candidato FOREIGN KEY (idcandidato) REFERENCES candidato(idcandidato) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_candidato_mapping_cpf ON candidato_mapping (cpf);
|
||||
CREATE INDEX idx_candidato_mapping_nome ON candidato_mapping (nome);
|
||||
CREATE INDEX idx_candidato_mapping_apelido ON candidato_mapping (apelido);
|
||||
CREATE INDEX idx_candidato_mapping_ano ON candidato_mapping (ano);
|
||||
CREATE INDEX idx_candidato_mapping_sqcandidato ON candidato_mapping (sqcandidato);
|
||||
|
||||
---- Table for storing assets of candidates
|
||||
CREATE TABLE bem_candidato (
|
||||
idcandidato UUID NOT NULL,
|
||||
ano INT NOT NULL,
|
||||
ordembem INT,
|
||||
tipobem VARCHAR(150),
|
||||
descricao VARCHAR(500),
|
||||
valor NUMERIC(20, 2),
|
||||
CONSTRAINT fk_bem_candidato_candidato FOREIGN KEY (idcandidato) REFERENCES candidato(idcandidato) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
ALTER TABLE bem_candidato ADD CONSTRAINT pk_bem_candidato PRIMARY KEY (idcandidato, ano, ordembem);
|
||||
CREATE INDEX idx_bem_candidato_idcandidato ON bem_candidato (idcandidato);
|
||||
CREATE INDEX idx_bem_candidato_valor ON bem_candidato (valor);
|
||||
|
||||
---- Table for storing social media links of candidates
|
||||
CREATE TABLE rede_social (
|
||||
idcandidato UUID NOT NULL,
|
||||
rede VARCHAR(50) NOT NULL,
|
||||
siglauf VARCHAR(2),
|
||||
ano INT NOT NULL,
|
||||
link TEXT NOT NULL,
|
||||
CONSTRAINT pk_rede_social PRIMARY KEY (idcandidato, rede, siglauf, ano),
|
||||
CONSTRAINT fk_rede_social_candidato FOREIGN KEY (idcandidato) REFERENCES candidato(idcandidato) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_rede_social_idcandidato ON rede_social (idcandidato);
|
||||
|
||||
---- Table for storing party information
|
||||
CREATE TABLE partido (
|
||||
sigla VARCHAR(50) NOT NULL PRIMARY KEY,
|
||||
nome VARCHAR(255) NOT NULL,
|
||||
numero INT NOT NULL
|
||||
);
|
||||
CREATE INDEX idx_partido_nome ON partido (nome);
|
||||
CREATE INDEX idx_partido_numero ON partido (numero);
|
||||
|
||||
---- Tables for storing despesas e receitas of candidacies
|
||||
CREATE TABLE despesas_candidato (
|
||||
idreceita UUID NOT NULL DEFAULT gen_random_uuid(),
|
||||
idcandidato UUID NOT NULL,
|
||||
ano INT NOT NULL,
|
||||
turno VARCHAR(2) NOT NULL,
|
||||
sqcandidato VARCHAR(50) NOT NULL,
|
||||
sgpartido VARCHAR(50) NOT NULL,
|
||||
tipofornecedor VARCHAR(150),
|
||||
cnpjfornecedor VARCHAR(14),
|
||||
cpffornecedor VARCHAR(11),
|
||||
nomefornecedor VARCHAR(255),
|
||||
nomefornecedorrfb VARCHAR(255),
|
||||
municipiofornecedor VARCHAR(100),
|
||||
tipodocumento VARCHAR(50),
|
||||
datadespesa TIMESTAMPTZ,
|
||||
descricao TEXT,
|
||||
origemdespesa TEXT,
|
||||
valor NUMERIC(20, 2),
|
||||
CONSTRAINT pk_despesas_candidato PRIMARY KEY (idreceita),
|
||||
CONSTRAINT fk_despesas_candidato_candidato FOREIGN KEY (idcandidato) REFERENCES candidato(idcandidato) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_despesas_candidato_idcandidato ON despesas_candidato (idcandidato);
|
||||
CREATE INDEX idx_despesas_candidato_ano ON despesas_candidato (ano);
|
||||
CREATE INDEX idx_despesas_candidato_sqcandidato ON despesas_candidato (sqcandidato);
|
||||
CREATE INDEX idx_despesas_candidato_sgpartido ON despesas_candidato (sgpartido);
|
||||
|
||||
CREATE TABLE receitas_candidato (
|
||||
idreceita UUID NOT NULL DEFAULT gen_random_uuid(),
|
||||
idcandidato UUID NOT NULL,
|
||||
ano INT NOT NULL,
|
||||
turno VARCHAR(2) NOT NULL,
|
||||
sqcandidato VARCHAR(50) NOT NULL,
|
||||
sgpartido VARCHAR(50) NOT NULL,
|
||||
fontereceita VARCHAR(150),
|
||||
origemreceita VARCHAR(250),
|
||||
naturezareceita VARCHAR(250),
|
||||
especiereceita VARCHAR(250),
|
||||
cnpjdoador VARCHAR(14),
|
||||
cpfdoador VARCHAR(11),
|
||||
nomedoador VARCHAR(255),
|
||||
nomedoadorrfb VARCHAR(255),
|
||||
municipiodoador VARCHAR(100),
|
||||
datareceita TIMESTAMPTZ,
|
||||
descricao TEXT,
|
||||
valor NUMERIC(20, 2),
|
||||
CONSTRAINT pk_receitas_candidato PRIMARY KEY (idreceita),
|
||||
CONSTRAINT fk_receitas_candidato_candidato FOREIGN KEY (idcandidato) REFERENCES candidato(idcandidato) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_receitas_candidato_idcandidato ON receitas_candidato (idcandidato);
|
||||
CREATE INDEX idx_receitas_candidato_ano ON receitas_candidato (ano);
|
||||
CREATE INDEX idx_receitas_candidato_sqcandidato ON receitas_candidato (sqcandidato);
|
||||
CREATE INDEX idx_receitas_candidato_sgpartido ON receitas_candidato (sgpartido);
|
91
deploy-db.ps1
Normal file
91
deploy-db.ps1
Normal file
@ -0,0 +1,91 @@
|
||||
# OpenCand Database Deployment Script (PowerShell)
|
||||
# This script deploys the database schema changes to the PostgreSQL container
|
||||
|
||||
param(
|
||||
[string]$ContainerName = "opencand_db",
|
||||
[string]$DatabaseName = "opencand",
|
||||
[string]$DatabaseUser = "root",
|
||||
[string]$SqlFile = ".\db\db.sql"
|
||||
)
|
||||
|
||||
# Configuration
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
Write-Host "🚀 OpenCand Database Deployment Script" -ForegroundColor Yellow
|
||||
Write-Host "==================================" -ForegroundColor Yellow
|
||||
|
||||
try {
|
||||
# Check if Docker is running
|
||||
Write-Host "📋 Pre-deployment checks:" -ForegroundColor Yellow
|
||||
docker info | Out-Null
|
||||
Write-Host "✅ Docker is running" -ForegroundColor Green
|
||||
|
||||
# Check if the database container is running
|
||||
$runningContainers = docker ps --format "{{.Names}}"
|
||||
if ($runningContainers -notcontains $ContainerName) {
|
||||
throw "Database container '$ContainerName' is not running. Please start with: docker-compose up -d"
|
||||
}
|
||||
Write-Host "✅ Database container is running" -ForegroundColor Green
|
||||
|
||||
# Check if SQL file exists
|
||||
if (-not (Test-Path $SqlFile)) {
|
||||
throw "SQL file '$SqlFile' not found"
|
||||
}
|
||||
Write-Host "✅ SQL file exists" -ForegroundColor Green
|
||||
Write-Host ""
|
||||
|
||||
# Wait for database to be ready
|
||||
Write-Host "⏳ Waiting for database to be ready..." -ForegroundColor Yellow
|
||||
$timeout = 30
|
||||
$counter = 0
|
||||
do {
|
||||
$ready = $false
|
||||
try {
|
||||
docker exec $ContainerName pg_isready -U $DatabaseUser -d $DatabaseName | Out-Null
|
||||
$ready = $true
|
||||
}
|
||||
catch {
|
||||
Start-Sleep -Seconds 1
|
||||
$counter++
|
||||
if ($counter -ge $timeout) {
|
||||
throw "Database failed to become ready within $timeout seconds"
|
||||
}
|
||||
}
|
||||
} while (-not $ready)
|
||||
|
||||
Write-Host "✅ Database is ready" -ForegroundColor Green
|
||||
Write-Host ""
|
||||
|
||||
# Create backup before deployment
|
||||
Write-Host "💾 Creating database backup..." -ForegroundColor Yellow
|
||||
$backupFile = "backup_$(Get-Date -Format 'yyyyMMdd_HHmmss').sql"
|
||||
docker exec $ContainerName pg_dump -U $DatabaseUser -d $DatabaseName | Out-File -FilePath $backupFile -Encoding UTF8
|
||||
Write-Host "✅ Backup created: $backupFile" -ForegroundColor Green
|
||||
Write-Host ""
|
||||
|
||||
# Deploy the SQL file
|
||||
Write-Host "🔧 Deploying database changes..." -ForegroundColor Yellow
|
||||
Write-Host "Executing: $SqlFile"
|
||||
|
||||
# Execute the SQL file
|
||||
Get-Content $SqlFile | docker exec -i $ContainerName psql -U $DatabaseUser -d $DatabaseName
|
||||
|
||||
Write-Host "✅ Database deployment completed successfully!" -ForegroundColor Green
|
||||
Write-Host ""
|
||||
|
||||
# Show table information
|
||||
Write-Host "📊 Current database tables:" -ForegroundColor Yellow
|
||||
docker exec $ContainerName psql -U $DatabaseUser -d $DatabaseName -c "\dt"
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "🎉 Deployment completed successfully!" -ForegroundColor Green
|
||||
Write-Host "==================================" -ForegroundColor Green
|
||||
|
||||
} catch {
|
||||
Write-Host "❌ Error: $($_.Exception.Message)" -ForegroundColor Red
|
||||
if (Test-Path $backupFile) {
|
||||
Write-Host "💡 You can restore from backup using:" -ForegroundColor Yellow
|
||||
Write-Host "Get-Content $backupFile | docker exec -i $ContainerName psql -U $DatabaseUser -d $DatabaseName" -ForegroundColor Yellow
|
||||
}
|
||||
exit 1
|
||||
}
|
91
deploy-db.sh
Normal file
91
deploy-db.sh
Normal file
@ -0,0 +1,91 @@
|
||||
#!/bin/bash
|
||||
|
||||
# OpenCand Database Deployment Script
|
||||
# This script deploys the database schema changes to the PostgreSQL container
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Configuration
|
||||
DB_CONTAINER="opencand_db"
|
||||
DB_NAME="opencand"
|
||||
DB_USER="root"
|
||||
SQL_FILE="./db/db.sql"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${YELLOW}🚀 OpenCand Database Deployment Script${NC}"
|
||||
echo "=================================="
|
||||
|
||||
# Check if Docker is running
|
||||
if ! docker info >/dev/null 2>&1; then
|
||||
echo -e "${RED}❌ Error: Docker is not running or not accessible${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the database container is running
|
||||
if ! docker ps --format "table {{.Names}}" | grep -q "^${DB_CONTAINER}$"; then
|
||||
echo -e "${RED}❌ Error: Database container '${DB_CONTAINER}' is not running${NC}"
|
||||
echo "Please start the services with: docker-compose up -d"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if SQL file exists
|
||||
if [ ! -f "$SQL_FILE" ]; then
|
||||
echo -e "${RED}❌ Error: SQL file '${SQL_FILE}' not found${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}📋 Pre-deployment checks:${NC}"
|
||||
echo "✅ Docker is running"
|
||||
echo "✅ Database container is running"
|
||||
echo "✅ SQL file exists"
|
||||
echo ""
|
||||
|
||||
# Wait for database to be ready
|
||||
echo -e "${YELLOW}⏳ Waiting for database to be ready...${NC}"
|
||||
timeout=30
|
||||
counter=0
|
||||
while ! docker exec $DB_CONTAINER pg_isready -U $DB_USER -d $DB_NAME >/dev/null 2>&1; do
|
||||
sleep 1
|
||||
counter=$((counter + 1))
|
||||
if [ $counter -ge $timeout ]; then
|
||||
echo -e "${RED}❌ Error: Database failed to become ready within ${timeout} seconds${NC}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "✅ Database is ready"
|
||||
echo ""
|
||||
|
||||
# Create backup before deployment
|
||||
echo -e "${YELLOW}💾 Creating database backup...${NC}"
|
||||
BACKUP_FILE="backup_$(date +%Y%m%d_%H%M%S).sql"
|
||||
docker exec $DB_CONTAINER pg_dump -U $DB_USER -d $DB_NAME > $BACKUP_FILE
|
||||
echo "✅ Backup created: $BACKUP_FILE"
|
||||
echo ""
|
||||
|
||||
# Deploy the SQL file
|
||||
echo -e "${YELLOW}🔧 Deploying database changes...${NC}"
|
||||
echo "Executing: $SQL_FILE"
|
||||
|
||||
if docker exec -i $DB_CONTAINER psql -U $DB_USER -d $DB_NAME < $SQL_FILE; then
|
||||
echo -e "${GREEN}✅ Database deployment completed successfully!${NC}"
|
||||
echo ""
|
||||
|
||||
# Show table information
|
||||
echo -e "${YELLOW}📊 Current database tables:${NC}"
|
||||
docker exec $DB_CONTAINER psql -U $DB_USER -d $DB_NAME -c "\dt"
|
||||
|
||||
else
|
||||
echo -e "${RED}❌ Error: Database deployment failed${NC}"
|
||||
echo -e "${YELLOW}💡 You can restore from backup using:${NC}"
|
||||
echo "docker exec -i $DB_CONTAINER psql -U $DB_USER -d $DB_NAME < $BACKUP_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}🎉 Deployment completed successfully!${NC}"
|
||||
echo "=================================="
|
191
docker-compose.yaml
Normal file
191
docker-compose.yaml
Normal file
@ -0,0 +1,191 @@
|
||||
services: # ───────────────────────────────────────────────────────────────────────────
|
||||
# 1. PostgreSQL Database
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
db:
|
||||
image: postgres:14-alpine
|
||||
container_name: opencand_db
|
||||
restart: unless-stopped
|
||||
hostname: db
|
||||
environment:
|
||||
POSTGRES_USER: root
|
||||
POSTGRES_PASSWORD: root
|
||||
POSTGRES_DB: opencand
|
||||
volumes:
|
||||
- ./db-data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U root -d opencand"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
labels: "service=database"
|
||||
labels:
|
||||
- "promtail.enable=true"
|
||||
- "promtail.job=opencand-db"
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
# 2. .NET API
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
api:
|
||||
container_name: opencand_api
|
||||
restart: unless-stopped
|
||||
image: git.ivanch.me/ivanch/opencand.api:latest
|
||||
ports:
|
||||
- "5100:8080"
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
ASPNETCORE_ENVIRONMENT: "Production"
|
||||
DatabaseSettings__ConnectionString: "Host=db;Port=5432;Database=opencand;Username=root;Password=root"
|
||||
volumes:
|
||||
- ./fotos_cand:/app/fotos_cand
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
labels: "service=api"
|
||||
labels:
|
||||
- "promtail.enable=true"
|
||||
- "promtail.job=opencand-api"
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
# 3. React + Vite Front‐end
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
frontend:
|
||||
container_name: opencand_frontend
|
||||
restart: unless-stopped
|
||||
image: git.ivanch.me/ivanch/opencand.ui:latest
|
||||
ports:
|
||||
- "5110:80"
|
||||
depends_on:
|
||||
- api
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
# 4. ETL (Optional: runs once at startup)
|
||||
#
|
||||
# If you want the ETL to run on every compose up, give it restart: "no" or
|
||||
# some other policy. It will run, then exit.
|
||||
#
|
||||
# If you instead prefer to run ETL manually or via host cron, you can omit
|
||||
# this service and just `docker run myorg/etl:latest ...` on demand.
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
etl:
|
||||
image: git.ivanch.me/ivanch/opencand.etl:latest
|
||||
container_name: opencand_etl
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
loki:
|
||||
condition: service_started
|
||||
environment:
|
||||
DatabaseSettings__ConnectionString: "Host=db;Port=5432;Database=opencand;Username=root;Password=root"
|
||||
BasePath: "etl-data"
|
||||
volumes:
|
||||
- ./etl-data:/app/etl-data
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
labels: "service=etl"
|
||||
labels:
|
||||
- "promtail.enable=true"
|
||||
- "promtail.job=opencand-etl"
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
# 5. Log Monitoring Stack
|
||||
# ───────────────────────────────────────────────────────────────────────────
|
||||
# Loki - Log aggregation system
|
||||
loki:
|
||||
image: grafana/loki:2.9.0
|
||||
container_name: opencand_loki
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:3100:3100"
|
||||
- "10.8.0.3:3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
volumes:
|
||||
- ./monitoring/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- loki-data:/loki
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# Promtail - Log collection agent
|
||||
promtail:
|
||||
image: grafana/promtail:2.9.0
|
||||
container_name: opencand_promtail
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./monitoring/promtail-config.yaml:/etc/promtail/config.yml
|
||||
- /var/log:/var/log:ro
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
command: -config.file=/etc/promtail/config.yml
|
||||
depends_on:
|
||||
- loki
|
||||
# Prometheus - Metrics collection
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.45.0
|
||||
container_name: opencand_prometheus
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:9090:9090"
|
||||
- "10.8.0.3:9090:9090"
|
||||
command:
|
||||
- "--config.file=/etc/prometheus/prometheus.yml"
|
||||
- "--storage.tsdb.path=/prometheus"
|
||||
- "--web.console.libraries=/etc/prometheus/console_libraries"
|
||||
- "--web.console.templates=/etc/prometheus/consoles"
|
||||
- "--web.enable-lifecycle"
|
||||
volumes:
|
||||
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- prometheus-data:/prometheus
|
||||
depends_on:
|
||||
- node-exporter
|
||||
# Node Exporter - System metrics
|
||||
node-exporter:
|
||||
image: prom/node-exporter:v1.6.0
|
||||
container_name: opencand_node_exporter
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:9100:9100"
|
||||
- "10.8.0.3:9100:9100"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/rootfs:ro
|
||||
command:
|
||||
- "--path.procfs=/host/proc"
|
||||
- "--path.rootfs=/rootfs"
|
||||
- "--path.sysfs=/host/sys"
|
||||
- "--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
|
||||
# Grafana - Visualization and dashboards
|
||||
grafana:
|
||||
image: grafana/grafana:10.0.0
|
||||
container_name: opencand_grafana
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:3000:3000"
|
||||
- "10.8.0.3:3000:3000"
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
|
||||
depends_on:
|
||||
- loki
|
||||
- prometheus
|
||||
|
||||
volumes:
|
||||
loki-data:
|
||||
prometheus-data:
|
||||
grafana-data:
|
149
monitoring/README.md
Normal file
149
monitoring/README.md
Normal file
@ -0,0 +1,149 @@
|
||||
# OpenCand Monitoring Setup
|
||||
|
||||
This monitoring stack provides comprehensive log aggregation and visualization for the OpenCand project, with special focus on the ETL service.
|
||||
|
||||
## Services Overview
|
||||
|
||||
### 🔍 **Grafana Loki** (Port 3100)
|
||||
- **Purpose**: Log aggregation and storage
|
||||
- **Access**: http://localhost:3100
|
||||
- **Description**: Collects and stores all container logs in a structured format
|
||||
|
||||
### 📊 **Grafana** (Port 3000)
|
||||
- **Purpose**: Log visualization and dashboards
|
||||
- **Access**: http://localhost:3000
|
||||
- **Credentials**:
|
||||
- Username: `admin`
|
||||
- Password: `admin`
|
||||
- **Pre-configured Dashboards**: OpenCand ETL Monitoring dashboard
|
||||
|
||||
### 📈 **Prometheus** (Port 9090)
|
||||
- **Purpose**: Metrics collection and storage
|
||||
- **Access**: http://localhost:9090
|
||||
- **Description**: Collects system and application metrics
|
||||
|
||||
### 🖥️ **Node Exporter** (Port 9100)
|
||||
- **Purpose**: System metrics collection
|
||||
- **Access**: http://localhost:9100/metrics
|
||||
- **Description**: Provides host system metrics (CPU, memory, disk, etc.)
|
||||
|
||||
### 🚚 **Promtail**
|
||||
- **Purpose**: Log collection agent
|
||||
- **Description**: Automatically discovers and ships Docker container logs to Loki
|
||||
|
||||
## Key Features
|
||||
|
||||
### ETL-Specific Monitoring
|
||||
- ✅ Real-time ETL process logs
|
||||
- ✅ Error tracking and alerting capabilities
|
||||
- ✅ Performance metrics monitoring
|
||||
- ✅ Data processing progress tracking
|
||||
|
||||
### Container Log Management
|
||||
- ✅ Automatic log rotation (10MB max size, 3 files)
|
||||
- ✅ Structured log labeling
|
||||
- ✅ Multi-service log aggregation
|
||||
|
||||
### Pre-built Dashboards
|
||||
- ✅ OpenCand ETL Logs viewer
|
||||
- ✅ API logs monitoring
|
||||
- ✅ Database logs tracking
|
||||
- ✅ Container resource usage
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Start the monitoring stack**:
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
2. **Access Grafana**:
|
||||
- Open http://localhost:3000
|
||||
- Login with admin/admin
|
||||
- Navigate to "Dashboards" → "OpenCand ETL Monitoring"
|
||||
|
||||
3. **View ETL Logs in Real-time**:
|
||||
- In Grafana, go to "Explore"
|
||||
- Select "Loki" as datasource
|
||||
- Use query: `{container_name="opencand_etl"}`
|
||||
|
||||
4. **Monitor System Metrics**:
|
||||
- Access Prometheus at http://localhost:9090
|
||||
- View system metrics from Node Exporter
|
||||
|
||||
## Log Queries Examples
|
||||
|
||||
### ETL Service Logs
|
||||
```logql
|
||||
{container_name="opencand_etl"}
|
||||
```
|
||||
|
||||
### Error Logs Only
|
||||
```logql
|
||||
{container_name="opencand_etl"} |= "ERROR"
|
||||
```
|
||||
|
||||
### API Logs with Filtering
|
||||
```logql
|
||||
{container_name="opencand_api"} |= "Microsoft.AspNetCore"
|
||||
```
|
||||
|
||||
### Database Connection Logs
|
||||
```logql
|
||||
{container_name="opencand_db"} |= "connection"
|
||||
```
|
||||
|
||||
## Configuration Files
|
||||
|
||||
- **Loki**: `./monitoring/loki-config.yaml`
|
||||
- **Promtail**: `./monitoring/promtail-config.yaml`
|
||||
- **Prometheus**: `./monitoring/prometheus.yml`
|
||||
- **Grafana Datasources**: `./monitoring/grafana/provisioning/datasources/`
|
||||
- **Grafana Dashboards**: `./monitoring/grafana/provisioning/dashboards/`
|
||||
|
||||
## Data Persistence
|
||||
|
||||
The following volumes are created for data persistence:
|
||||
- `loki-data`: Loki log storage
|
||||
- `prometheus-data`: Prometheus metrics storage
|
||||
- `grafana-data`: Grafana dashboards and settings
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### ETL Logs Not Appearing
|
||||
1. Check if ETL container is running: `docker ps`
|
||||
2. Verify Promtail is collecting logs: `docker logs opencand_promtail`
|
||||
3. Check Loki status: `curl http://localhost:3100/ready`
|
||||
|
||||
### Grafana Dashboard Issues
|
||||
1. Verify datasources are configured correctly
|
||||
2. Check if Loki is accessible from Grafana container
|
||||
3. Restart Grafana container: `docker-compose restart grafana`
|
||||
|
||||
### Performance Issues
|
||||
1. Monitor disk usage for log storage
|
||||
2. Adjust log retention in `loki-config.yaml`
|
||||
3. Increase resource limits if needed
|
||||
|
||||
## Customization
|
||||
|
||||
### Adding More Dashboards
|
||||
1. Create JSON dashboard files in `./monitoring/grafana/provisioning/dashboards/`
|
||||
2. Restart Grafana container
|
||||
|
||||
### Log Retention Configuration
|
||||
Edit `./monitoring/loki-config.yaml` to adjust retention policies:
|
||||
```yaml
|
||||
limits_config:
|
||||
retention_period: 168h # 7 days
|
||||
```
|
||||
|
||||
### Alert Configuration
|
||||
Add alerting rules to Prometheus configuration for ETL failure notifications.
|
||||
|
||||
## Security Notes
|
||||
|
||||
- Change default Grafana admin password in production
|
||||
- Restrict network access to monitoring ports
|
||||
- Consider using authentication for external access
|
||||
- Regularly update monitoring stack images
|
12
monitoring/grafana/provisioning/dashboards/dashboard.yaml
Normal file
12
monitoring/grafana/provisioning/dashboards/dashboard.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'default'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 10
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /etc/grafana/provisioning/dashboards
|
@ -0,0 +1,197 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": "Loki",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 1,
|
||||
"options": {
|
||||
"showLabels": false,
|
||||
"showTime": false,
|
||||
"sortOrder": "Descending",
|
||||
"wrapLogMessage": false,
|
||||
"prettifyLogMessage": false,
|
||||
"enableLogDetails": true,
|
||||
"dedupStrategy": "none"
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{container_name=\"opencand_etl\"}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "OpenCand ETL Logs",
|
||||
"type": "logs"
|
||||
},
|
||||
{
|
||||
"datasource": "Loki",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"showLabels": false,
|
||||
"showTime": false,
|
||||
"sortOrder": "Descending",
|
||||
"wrapLogMessage": false,
|
||||
"prettifyLogMessage": false,
|
||||
"enableLogDetails": true,
|
||||
"dedupStrategy": "none"
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{container_name=\"opencand_api\"}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "OpenCand API Logs",
|
||||
"type": "logs"
|
||||
},
|
||||
{
|
||||
"datasource": "Loki",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 3,
|
||||
"options": {
|
||||
"showLabels": false,
|
||||
"showTime": false,
|
||||
"sortOrder": "Descending",
|
||||
"wrapLogMessage": false,
|
||||
"prettifyLogMessage": false,
|
||||
"enableLogDetails": true,
|
||||
"dedupStrategy": "none"
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{container_name=\"opencand_db\"}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "PostgreSQL Database Logs",
|
||||
"type": "logs"
|
||||
},
|
||||
{
|
||||
"datasource": "Prometheus",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {
|
||||
"align": null,
|
||||
"displayMode": "auto"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"values": false,
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": ""
|
||||
},
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"text": {}
|
||||
},
|
||||
"pluginVersion": "7.5.7",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(container_cpu_usage_seconds_total{name=~\"opencand.*\"}[5m]) * 100",
|
||||
"interval": "",
|
||||
"legendFormat": "{{name}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Container CPU Usage (%)",
|
||||
"type": "stat"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 27,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"opencand",
|
||||
"etl",
|
||||
"logs"
|
||||
],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "OpenCand ETL Monitoring",
|
||||
"uid": "opencand-etl",
|
||||
"version": 1
|
||||
}
|
16
monitoring/grafana/provisioning/datasources/datasources.yaml
Normal file
16
monitoring/grafana/provisioning/datasources/datasources.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://loki:3100
|
||||
isDefault: false
|
||||
jsonData:
|
||||
maxLines: 1000
|
||||
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
49
monitoring/loki-config.yaml
Normal file
49
monitoring/loki-config.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
|
||||
common:
|
||||
path_prefix: /loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /loki/chunks
|
||||
rules_directory: /loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
instance_addr: 127.0.0.1
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
query_range:
|
||||
results_cache:
|
||||
cache:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 100
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: boltdb-shipper
|
||||
object_store: filesystem
|
||||
schema: v11
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
ruler:
|
||||
alertmanager_url: http://localhost:9093
|
||||
|
||||
# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
|
||||
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
|
||||
#
|
||||
# Statistics help us better understand how Loki is used, and they show us performance
|
||||
# levels for most users. This helps us prioritize features and documentation.
|
||||
# For more information on what's sent: https://github.com/grafana/loki/blob/main/docs/sources/configuration/telemetry.md
|
||||
# Refer to the buildReport method to see what goes into a report.
|
||||
#
|
||||
# If you would like to disable reporting, uncomment the following lines:
|
||||
#analytics:
|
||||
# reporting_enabled: false
|
35
monitoring/prometheus.yml
Normal file
35
monitoring/prometheus.yml
Normal file
@ -0,0 +1,35 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node-exporter'
|
||||
static_configs:
|
||||
- targets: ['node-exporter:9100']
|
||||
|
||||
- job_name: 'opencand-api'
|
||||
static_configs:
|
||||
- targets: ['api:8080']
|
||||
metrics_path: '/metrics'
|
||||
scrape_interval: 30s
|
||||
|
||||
- job_name: 'docker-containers'
|
||||
docker_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
refresh_interval: 5s
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_docker_container_name]
|
||||
regex: '/(.*)'
|
||||
target_label: container_name
|
||||
- source_labels: [__meta_docker_container_id]
|
||||
target_label: container_id
|
||||
- source_labels: [__meta_docker_container_label_com_docker_compose_service]
|
||||
target_label: compose_service
|
71
monitoring/promtail-config.yaml
Normal file
71
monitoring/promtail-config.yaml
Normal file
@ -0,0 +1,71 @@
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
|
||||
clients:
|
||||
- url: http://loki:3100/loki/api/v1/push
|
||||
|
||||
scrape_configs:
|
||||
# Docker container logs
|
||||
- job_name: containers
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: containerlogs
|
||||
__path__: /var/lib/docker/containers/*/*log
|
||||
|
||||
pipeline_stages:
|
||||
- json:
|
||||
expressions:
|
||||
output: log
|
||||
stream: stream
|
||||
attrs:
|
||||
- json:
|
||||
source: attrs
|
||||
expressions:
|
||||
tag:
|
||||
- regex:
|
||||
source: tag
|
||||
expression: (?P<container_name>(?:[^|]*))\|
|
||||
- timestamp:
|
||||
source: time
|
||||
format: RFC3339Nano
|
||||
- labels:
|
||||
stream:
|
||||
container_name:
|
||||
- output:
|
||||
source: output
|
||||
|
||||
# ETL specific logs
|
||||
- job_name: etl-logs
|
||||
docker_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
refresh_interval: 5s
|
||||
filters:
|
||||
- name: label
|
||||
values: ["promtail.enable=true"]
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: ['__meta_docker_container_label_promtail_job']
|
||||
target_label: 'job'
|
||||
- source_labels: ['__meta_docker_container_name']
|
||||
regex: '/(.*)'
|
||||
target_label: 'container'
|
||||
- source_labels: ['__meta_docker_container_log_stream']
|
||||
target_label: 'stream'
|
||||
|
||||
pipeline_stages:
|
||||
- json:
|
||||
expressions:
|
||||
output: log
|
||||
stream: stream
|
||||
timestamp: time
|
||||
- timestamp:
|
||||
source: timestamp
|
||||
format: RFC3339Nano
|
||||
- output:
|
||||
source: output
|
Loading…
x
Reference in New Issue
Block a user