mirror of
https://github.com/spring-projects/spring-petclinic.git
synced 2025-07-16 12:55:50 +00:00
Compare commits
10 commits
f64d5614d7
...
47fae9625c
Author | SHA1 | Date | |
---|---|---|---|
![]() |
47fae9625c | ||
![]() |
62d9fde5a1 | ||
![]() |
81e3895c0e | ||
![]() |
da5cb7e45d | ||
![]() |
b8055c3c2b | ||
![]() |
3e9be926fc | ||
![]() |
f5d2b0b0aa | ||
![]() |
70d389f67d | ||
![]() |
d48a81ce7a | ||
![]() |
fa459ba682 |
1 changed files with 231 additions and 182 deletions
413
.github/workflows/pipeline.yml
vendored
413
.github/workflows/pipeline.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Enhanced Java Application Pipeline with Metrics
|
||||
name: Enhanced Java Application Pipeline with Energy Monitoring
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -11,58 +11,163 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
ports:
|
||||
- 9090:9090
|
||||
options: >-
|
||||
--health-cmd "wget -q -O- http://localhost:9090/-/healthy || exit 1"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 3
|
||||
|
||||
pushgateway:
|
||||
image: prom/pushgateway:latest
|
||||
ports:
|
||||
- 9091:9091
|
||||
options: >-
|
||||
--health-cmd "wget -q -O- http://localhost:9091/-/healthy || exit 1"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 3
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup monitoring tools
|
||||
id: setup-monitoring
|
||||
timeout-minutes: 5
|
||||
- name: Setup Rust and Scaphandre
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
echo "::group::Installing system packages"
|
||||
sudo apt-get update || (echo "Failed to update package lists" && exit 1)
|
||||
sudo apt-get install -y powerstat linux-tools-common linux-tools-generic || (echo "Failed to install powerstat and linux tools" && exit 1)
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::Setting up node exporter"
|
||||
curl -L --retry 3 https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz -o node_exporter.tar.gz || (echo "Failed to download node exporter" && exit 1)
|
||||
tar xvfz node_exporter.tar.gz || (echo "Failed to extract node exporter" && exit 1)
|
||||
echo "::endgroup::"
|
||||
# Installer Rust si nécessaire
|
||||
if ! command -v cargo &> /dev/null; then
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
source "$HOME/.cargo/env"
|
||||
fi
|
||||
|
||||
- name: Start monitoring
|
||||
id: start-monitoring
|
||||
timeout-minutes: 2
|
||||
# Installer Scaphandre via Cargo
|
||||
cargo install scaphandre
|
||||
|
||||
- name: Setup directories and install dependencies
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
./node_exporter-*/node_exporter --web.listen-address=":9100" &
|
||||
echo "NODE_EXPORTER_PID=$!" >> $GITHUB_ENV
|
||||
# Créer la structure des répertoires
|
||||
mkdir -p metrics/system
|
||||
mkdir -p metrics/power
|
||||
mkdir -p metrics/performance
|
||||
|
||||
timeout 30s bash -c 'until curl -s http://localhost:9100/metrics > /dev/null; do sleep 1; done' || (echo "Node exporter failed to start" && exit 1)
|
||||
# Installation des packages nécessaires
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
linux-tools-common \
|
||||
linux-tools-generic \
|
||||
python3-pip \
|
||||
python3-psutil
|
||||
|
||||
# Installer dépendances Python
|
||||
pip3 install pandas numpy
|
||||
|
||||
- name: Create energy monitoring script
|
||||
run: |
|
||||
cat > energy_monitor.py << 'EOL'
|
||||
import subprocess
|
||||
import csv
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
def monitor_energy(command, output_file):
|
||||
# Créer le répertoire si nécessaire
|
||||
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
||||
|
||||
date +%s%N > pipeline_start_time.txt
|
||||
# Préparer le fichier CSV
|
||||
with open(output_file, 'w', newline='') as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
writer.writerow([
|
||||
'Timestamp',
|
||||
'Power_Watts',
|
||||
'Component'
|
||||
])
|
||||
|
||||
# Commande scaphandre pour la sortie JSON
|
||||
scaphandre_cmd = [
|
||||
'scaphandre', 'json',
|
||||
'-t', '120' # Timeout de 2 minutes max
|
||||
]
|
||||
|
||||
# Lancer le monitoring en arrière-plan
|
||||
monitor_process = subprocess.Popen(
|
||||
scaphandre_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
universal_newlines=True
|
||||
)
|
||||
|
||||
try:
|
||||
# Attendre un court instant pour que Scaphandre démarre
|
||||
time.sleep(2)
|
||||
|
||||
# Exécuter la commande principale
|
||||
main_process = subprocess.Popen(command, shell=True)
|
||||
main_process.wait()
|
||||
|
||||
# Attendre et traiter les données de Scaphandre
|
||||
try:
|
||||
monitor_output, _ = monitor_process.communicate(timeout=10)
|
||||
process_scaphandre_output(monitor_output, output_file)
|
||||
except subprocess.TimeoutExpired:
|
||||
print("Scaphandre monitoring timed out", file=sys.stderr)
|
||||
|
||||
finally:
|
||||
# Arrêter le monitoring
|
||||
monitor_process.terminate()
|
||||
monitor_process.wait()
|
||||
|
||||
def process_scaphandre_output(output, output_file):
|
||||
try:
|
||||
# Diviser le flux JSON en objets individuels
|
||||
json_objects = output.strip().split('\n')
|
||||
|
||||
with open(output_file, 'a', newline='') as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
|
||||
for json_str in json_objects:
|
||||
try:
|
||||
data = json.loads(json_str)
|
||||
|
||||
# Extraire les informations pertinentes
|
||||
timestamp = data.get('timestamp', datetime.now().isoformat())
|
||||
power = data.get('power', {}).get('total_power', 0)
|
||||
|
||||
writer.writerow([
|
||||
timestamp,
|
||||
power,
|
||||
'System'
|
||||
])
|
||||
except json.JSONDecodeError:
|
||||
print(f"Could not parse JSON: {json_str}", file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f"Error processing Scaphandre output: {e}", file=sys.stderr)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: python energy_monitor.py 'command' output_file.csv")
|
||||
sys.exit(1)
|
||||
|
||||
command = sys.argv[1]
|
||||
output_file = sys.argv[2]
|
||||
|
||||
monitor_energy(command, output_file)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
EOL
|
||||
|
||||
chmod +x energy_monitor.py
|
||||
|
||||
- name: Cache Maven packages
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.m2
|
||||
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: ${{ runner.os }}-m2
|
||||
|
||||
- name: Collect initial system metrics
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
# Enregistrer le temps de début
|
||||
date +%s%N > metrics/pipeline_start_time.txt
|
||||
|
||||
# Collecter les métriques initiales
|
||||
echo "=== Initial System Resources ===" > metrics/system/initial_metrics.txt
|
||||
top -b -n 1 >> metrics/system/initial_metrics.txt
|
||||
|
||||
echo "=== Initial Memory Usage ===" > metrics/system/initial_memory.txt
|
||||
free -m >> metrics/system/initial_memory.txt
|
||||
|
||||
echo "=== Initial Disk Usage ===" > metrics/system/initial_disk.txt
|
||||
df -h >> metrics/system/initial_disk.txt
|
||||
|
||||
- name: Set up JDK 17
|
||||
uses: actions/setup-java@v4
|
||||
|
@ -71,189 +176,133 @@ jobs:
|
|||
distribution: 'adopt'
|
||||
cache: maven
|
||||
|
||||
- name: Build with Maven
|
||||
- name: Build with Maven and measure energy
|
||||
id: build
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
MAVEN_OPTS: "-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn"
|
||||
MAVEN_OPTS: "-Xmx2048m -XX:+TieredCompilation -XX:TieredStopAtLevel=1"
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
echo "Creating checkstyle suppressions file..."
|
||||
cat > checkstyle-suppressions.xml << 'EOF'
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE suppressions PUBLIC
|
||||
"-//Checkstyle//DTD SuppressionFilter Configuration 1.2//EN"
|
||||
"https://checkstyle.org/dtds/suppressions_1_2.dtd">
|
||||
<suppressions>
|
||||
<suppress files="node_exporter.*" checks="NoHttp"/>
|
||||
</suppressions>
|
||||
EOF
|
||||
# Ajouter Cargo et Scaphandre au PATH
|
||||
source "$HOME/.cargo/env"
|
||||
|
||||
echo "Modifying checkstyle configuration..."
|
||||
if [ -f "src/checkstyle/nohttp-checkstyle.xml" ]; then
|
||||
sed -i '/<module name="Checker">/a \ <module name="SuppressionFilter">\n <property name="file" value="${config_loc}/checkstyle-suppressions.xml"/>\n </module>' src/checkstyle/nohttp-checkstyle.xml
|
||||
fi
|
||||
|
||||
echo "Starting Maven build..."
|
||||
start_time=$(date +%s%N)
|
||||
|
||||
./mvnw -B verify \
|
||||
-Dcheckstyle.config.location=src/checkstyle/nohttp-checkstyle.xml \
|
||||
-Dcheckstyle.suppressions.location=checkstyle-suppressions.xml
|
||||
# Collecter les métriques avant build
|
||||
free -m > metrics/system/pre_build_memory.txt
|
||||
|
||||
# Monitoring énergétique avec Scaphandre
|
||||
python3 energy_monitor.py \
|
||||
"./mvnw -B verify -Dmaven.test.skip=true -Dcheckstyle.skip=true -T 1C" \
|
||||
metrics/power/build_power_metrics.csv
|
||||
|
||||
build_status=$?
|
||||
end_time=$(date +%s%N)
|
||||
echo "BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||
|
||||
if [ $build_status -ne 0 ]; then
|
||||
echo "::error::Maven build failed with status $build_status"
|
||||
exit $build_status
|
||||
fi
|
||||
# Collecter les métriques post-build
|
||||
free -m > metrics/system/post_build_memory.txt
|
||||
|
||||
# Enregistrer le temps de build
|
||||
echo "$((($end_time - $start_time)/1000000))" > metrics/performance/build_time.txt
|
||||
|
||||
exit $build_status
|
||||
|
||||
- name: Run tests
|
||||
- name: Run tests with energy monitoring
|
||||
id: test
|
||||
if: success() || failure()
|
||||
if: success()
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
# Ajouter Cargo et Scaphandre au PATH
|
||||
source "$HOME/.cargo/env"
|
||||
|
||||
start_time=$(date +%s%N)
|
||||
./mvnw test
|
||||
|
||||
# Collecter les métriques pré-tests
|
||||
free -m > metrics/system/pre_test_memory.txt
|
||||
|
||||
# Monitoring énergétique avec Scaphandre
|
||||
python3 energy_monitor.py \
|
||||
"./mvnw test -T 1C" \
|
||||
metrics/power/test_power_metrics.csv
|
||||
|
||||
test_status=$?
|
||||
end_time=$(date +%s%N)
|
||||
echo "TEST_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||
|
||||
# Collecter les métriques post-tests
|
||||
free -m > metrics/system/post_test_memory.txt
|
||||
|
||||
# Enregistrer le temps des tests
|
||||
echo "$((($end_time - $start_time)/1000000))" > metrics/performance/test_time.txt
|
||||
|
||||
exit $test_status
|
||||
|
||||
- name: Build Docker image
|
||||
- name: Build Docker image with energy monitoring
|
||||
id: docker-build
|
||||
if: success()
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
# Ajouter Cargo et Scaphandre au PATH
|
||||
source "$HOME/.cargo/env"
|
||||
|
||||
start_time=$(date +%s%N)
|
||||
|
||||
docker build -t app:latest -f .devcontainer/Dockerfile . --no-cache
|
||||
# Collecter les métriques pré-docker
|
||||
free -m > metrics/system/pre_docker_memory.txt
|
||||
df -h > metrics/system/pre_docker_disk.txt
|
||||
|
||||
# Monitoring énergétique avec Scaphandre
|
||||
python3 energy_monitor.py \
|
||||
"docker build -t app:latest -f .devcontainer/Dockerfile . --no-cache" \
|
||||
metrics/power/docker_build_power_metrics.csv
|
||||
|
||||
build_status=$?
|
||||
end_time=$(date +%s%N)
|
||||
echo "DOCKER_BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||
|
||||
# Collecter les métriques post-docker
|
||||
free -m > metrics/system/post_docker_memory.txt
|
||||
df -h > metrics/system/post_docker_disk.txt
|
||||
|
||||
# Enregistrer le temps de build Docker
|
||||
echo "$((($end_time - $start_time)/1000000))" > metrics/performance/docker_time.txt
|
||||
|
||||
# Collecter la taille de l'image
|
||||
docker images app:latest --format "{{.Size}}" > metrics/performance/docker_image_size.txt
|
||||
|
||||
exit $build_status
|
||||
|
||||
- name: Setup Kubernetes
|
||||
id: k8s-setup
|
||||
if: success()
|
||||
uses: helm/kind-action@v1
|
||||
with:
|
||||
wait: 120s
|
||||
|
||||
- name: Deploy to Kubernetes
|
||||
id: deploy
|
||||
if: success()
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
set -eo pipefail
|
||||
start_time=$(date +%s%N)
|
||||
kubectl apply -f k8s/ || (echo "Failed to apply Kubernetes manifests" && exit 1)
|
||||
|
||||
if ! kubectl wait --for=condition=ready pod -l app=petclinic --timeout=180s; then
|
||||
echo "::error::Deployment failed - collecting debug information"
|
||||
kubectl describe pods -l app=petclinic
|
||||
kubectl logs -l app=petclinic --all-containers=true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
end_time=$(date +%s%N)
|
||||
echo "DEPLOY_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||
|
||||
- name: Export metrics to Prometheus
|
||||
- name: Collect final system metrics
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
export_metric() {
|
||||
local metric_name=$1
|
||||
local metric_value=$2
|
||||
local stage=$3
|
||||
|
||||
if [ -n "$metric_value" ]; then
|
||||
echo "${metric_name}{stage=\"${stage}\",project=\"petclinic\"} ${metric_value}" | \
|
||||
curl --retry 3 --retry-delay 2 --max-time 10 --silent --show-error \
|
||||
--data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline || \
|
||||
echo "::warning::Failed to export ${metric_name} for ${stage}"
|
||||
fi
|
||||
}
|
||||
|
||||
export_metric "pipeline_build_duration_ms" "${BUILD_TIME}" "build"
|
||||
export_metric "pipeline_test_duration_ms" "${TEST_TIME}" "test"
|
||||
export_metric "pipeline_docker_build_duration_ms" "${DOCKER_BUILD_TIME}" "docker-build"
|
||||
export_metric "pipeline_deploy_duration_ms" "${DEPLOY_TIME}" "deploy"
|
||||
|
||||
- name: Collect resource metrics
|
||||
if: always()
|
||||
timeout-minutes: 2
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
export_metric() {
|
||||
local metric_name=$1
|
||||
local metric_value=$2
|
||||
local stage=$3
|
||||
|
||||
if [ -n "$metric_value" ]; then
|
||||
echo "${metric_name}{stage=\"${stage}\",project=\"petclinic\"} ${metric_value}" | \
|
||||
curl --retry 3 --retry-delay 2 --max-time 10 --silent --show-error \
|
||||
--data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline || \
|
||||
echo "::warning::Failed to export ${metric_name} for ${stage}"
|
||||
fi
|
||||
}
|
||||
|
||||
mem_usage=$(free -b | grep Mem: | awk '{print $3}') || echo "::warning::Failed to collect memory usage"
|
||||
if [ -n "$mem_usage" ]; then
|
||||
export_metric "pipeline_memory_usage_bytes" "$mem_usage" "memory"
|
||||
fi
|
||||
|
||||
cpu_usage=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}') || echo "::warning::Failed to collect CPU usage"
|
||||
if [ -n "$cpu_usage" ]; then
|
||||
export_metric "pipeline_cpu_usage_percent" "$cpu_usage" "cpu"
|
||||
fi
|
||||
# Collecter les métriques système finales
|
||||
echo "=== Final System Resources ===" > metrics/system/final_metrics.txt
|
||||
top -b -n 1 >> metrics/system/final_metrics.txt || echo "Failed to collect top metrics"
|
||||
|
||||
- name: Collect final metrics
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: |
|
||||
set -eo pipefail
|
||||
|
||||
date +%s%N > pipeline_end_time.txt
|
||||
|
||||
if [ -n "$NODE_EXPORTER_PID" ]; then
|
||||
kill $NODE_EXPORTER_PID || echo "::warning::Failed to stop node exporter"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "=== System Resources ===" > system_metrics.txt
|
||||
top -b -n 1 >> system_metrics.txt
|
||||
} || echo "::warning::Failed to collect top metrics"
|
||||
|
||||
{
|
||||
echo "=== Memory Usage ===" > memory_metrics.txt
|
||||
free -m >> memory_metrics.txt
|
||||
} || echo "::warning::Failed to collect memory metrics"
|
||||
|
||||
{
|
||||
echo "=== Disk Usage ===" > disk_metrics.txt
|
||||
df -h >> disk_metrics.txt
|
||||
} || echo "::warning::Failed to collect disk metrics"
|
||||
echo "=== Final Memory Usage ===" > metrics/system/final_memory.txt
|
||||
free -m >> metrics/system/final_memory.txt || echo "Failed to collect memory metrics"
|
||||
|
||||
echo "=== Final Disk Usage ===" > metrics/system/final_disk.txt
|
||||
df -h >> metrics/system/final_disk.txt || echo "Failed to collect disk metrics"
|
||||
|
||||
# Marquer la fin du pipeline
|
||||
date +%s%N > metrics/pipeline_end_time.txt
|
||||
|
||||
- name: Save metrics
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: pipeline-metrics
|
||||
path: |
|
||||
system_metrics.txt
|
||||
memory_metrics.txt
|
||||
disk_metrics.txt
|
||||
pipeline_start_time.txt
|
||||
pipeline_end_time.txt
|
||||
path: metrics/
|
||||
retention-days: 90
|
||||
if-no-files-found: warn
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker system prune -af
|
||||
|
|
Loading…
Reference in a new issue