mirror of
https://github.com/spring-projects/spring-petclinic.git
synced 2025-07-16 12:55:50 +00:00
Compare commits
20 commits
816751a4fa
...
f64d5614d7
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f64d5614d7 | ||
![]() |
df676424e6 | ||
![]() |
fff6bbf82f | ||
![]() |
823f41c08d | ||
![]() |
2e17504403 | ||
![]() |
e4ba09c31c | ||
![]() |
ea7ba4762d | ||
![]() |
368ba55305 | ||
![]() |
6e70bce0a7 | ||
![]() |
6740a4eb67 | ||
![]() |
76d87edf2c | ||
![]() |
21bda69797 | ||
![]() |
8ea0831d9e | ||
![]() |
1affb0686e | ||
![]() |
5cbaec1bc9 | ||
![]() |
4cc2a9557f | ||
![]() |
d0279e2b2a | ||
![]() |
f75b07374b | ||
![]() |
2ff0bfecc9 | ||
![]() |
6233b03066 |
2 changed files with 220 additions and 78 deletions
|
@ -1,11 +1,46 @@
|
||||||
# Not actually used by the devcontainer, but it is used by gitpod
|
# Build stage
|
||||||
ARG VARIANT=17-bullseye
|
FROM registry.access.redhat.com/ubi8/openjdk-17:latest as builder
|
||||||
FROM mcr.microsoft.com/vscode/devcontainers/java:0-${VARIANT}
|
|
||||||
ARG NODE_VERSION="none"
|
# Install required packages
|
||||||
RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi
|
USER root
|
||||||
ARG USER=vscode
|
RUN microdnf install -y tar gzip
|
||||||
VOLUME /home/$USER/.m2
|
|
||||||
VOLUME /home/$USER/.gradle
|
# Switch back to default user
|
||||||
ARG JAVA_VERSION=17.0.7-ms
|
USER 1001
|
||||||
RUN sudo mkdir /home/$USER/.m2 /home/$USER/.gradle && sudo chown $USER:$USER /home/$USER/.m2 /home/$USER/.gradle
|
|
||||||
RUN bash -lc '. /usr/local/sdkman/bin/sdkman-init.sh && sdk install java $JAVA_VERSION && sdk use java $JAVA_VERSION'
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy Maven wrapper and POM
|
||||||
|
COPY --chown=1001:0 .mvn/ .mvn/
|
||||||
|
COPY --chown=1001:0 mvnw pom.xml ./
|
||||||
|
RUN chmod +x mvnw
|
||||||
|
|
||||||
|
# Create Checkstyle suppressions file
|
||||||
|
RUN echo '<?xml version="1.0"?>' > checkstyle-suppressions.xml && \
|
||||||
|
echo '<!DOCTYPE suppressions PUBLIC "-//Checkstyle//DTD SuppressionFilter Configuration 1.2//EN" "https://checkstyle.org/dtds/suppressions_1_2.dtd">' >> checkstyle-suppressions.xml && \
|
||||||
|
echo '<suppressions>' >> checkstyle-suppressions.xml && \
|
||||||
|
echo ' <suppress files=".*\.jar" checks="NoHttp"/>' >> checkstyle-suppressions.xml && \
|
||||||
|
echo ' <suppress files=".*\.pom" checks="NoHttp"/>' >> checkstyle-suppressions.xml && \
|
||||||
|
echo '</suppressions>' >> checkstyle-suppressions.xml
|
||||||
|
# Download dependencies
|
||||||
|
RUN ./mvnw dependency:go-offline -Dcheckstyle.suppressions.location=checkstyle-suppressions.xml
|
||||||
|
|
||||||
|
# Copy source code
|
||||||
|
COPY --chown=1001:0 src ./src
|
||||||
|
|
||||||
|
# Build the application
|
||||||
|
RUN ./mvnw package -DskipTests -Dcheckstyle.suppressions.location=checkstyle-suppressions.xml
|
||||||
|
|
||||||
|
# Run stage
|
||||||
|
FROM registry.access.redhat.com/ubi8/openjdk-17-runtime:latest
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy the built artifact from builder stage
|
||||||
|
COPY --from=builder /app/target/*.jar app.jar
|
||||||
|
|
||||||
|
# Container runs on port 8080
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
# Set the startup command
|
||||||
|
ENTRYPOINT ["java", "-jar", "app.jar"]
|
||||||
|
|
241
.github/workflows/pipeline.yml
vendored
241
.github/workflows/pipeline.yml
vendored
|
@ -2,59 +2,66 @@ name: Enhanced Java Application Pipeline with Metrics
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ main ]
|
branches: [ pipeline-optimization ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ main ]
|
branches: [ pipeline-optimization ]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-with-metrics:
|
build-with-metrics:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
|
|
||||||
services:
|
services:
|
||||||
prometheus:
|
prometheus:
|
||||||
image: prom/prometheus:latest
|
image: prom/prometheus:latest
|
||||||
ports:
|
ports:
|
||||||
- 9090:9090
|
- 9090:9090
|
||||||
healthcheck:
|
options: >-
|
||||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9090/-/healthy"]
|
--health-cmd "wget -q -O- http://localhost:9090/-/healthy || exit 1"
|
||||||
interval: 10s
|
--health-interval 10s
|
||||||
timeout: 5s
|
--health-timeout 5s
|
||||||
retries: 3
|
--health-retries 3
|
||||||
|
|
||||||
pushgateway:
|
pushgateway:
|
||||||
image: prom/pushgateway:latest
|
image: prom/pushgateway:latest
|
||||||
ports:
|
ports:
|
||||||
- 9091:9091
|
- 9091:9091
|
||||||
healthcheck:
|
options: >-
|
||||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:9091/-/healthy"]
|
--health-cmd "wget -q -O- http://localhost:9091/-/healthy || exit 1"
|
||||||
interval: 10s
|
--health-interval 10s
|
||||||
timeout: 5s
|
--health-timeout 5s
|
||||||
retries: 3
|
--health-retries 3
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
# Installation and setup of monitoring tools
|
|
||||||
- name: Setup monitoring tools
|
- name: Setup monitoring tools
|
||||||
|
id: setup-monitoring
|
||||||
|
timeout-minutes: 5
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
set -eo pipefail
|
||||||
sudo apt-get install -y powerstat linux-tools-common linux-tools-generic
|
|
||||||
sudo snap install powerapi
|
echo "::group::Installing system packages"
|
||||||
curl -L https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz -o node_exporter.tar.gz
|
sudo apt-get update || (echo "Failed to update package lists" && exit 1)
|
||||||
tar xvfz node_exporter.tar.gz
|
sudo apt-get install -y powerstat linux-tools-common linux-tools-generic || (echo "Failed to install powerstat and linux tools" && exit 1)
|
||||||
|
echo "::endgroup::"
|
||||||
# Start monitoring tools with improved configuration
|
|
||||||
- name: Start monitoring
|
echo "::group::Setting up node exporter"
|
||||||
run: |
|
curl -L --retry 3 https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz -o node_exporter.tar.gz || (echo "Failed to download node exporter" && exit 1)
|
||||||
# Start PowerAPI with Prometheus output
|
tar xvfz node_exporter.tar.gz || (echo "Failed to extract node exporter" && exit 1)
|
||||||
sudo powerapi --pid $$ --frequency 1000 --output prometheus --pushgateway-url http://localhost:9091/metrics/job/powerapi &
|
echo "::endgroup::"
|
||||||
echo "POWERAPI_PID=$!" >> $GITHUB_ENV
|
|
||||||
|
- name: Start monitoring
|
||||||
|
id: start-monitoring
|
||||||
|
timeout-minutes: 2
|
||||||
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
# Start node exporter
|
|
||||||
./node_exporter-*/node_exporter --web.listen-address=":9100" &
|
./node_exporter-*/node_exporter --web.listen-address=":9100" &
|
||||||
echo "NODE_EXPORTER_PID=$!" >> $GITHUB_ENV
|
echo "NODE_EXPORTER_PID=$!" >> $GITHUB_ENV
|
||||||
|
|
||||||
# Create start timestamp file
|
timeout 30s bash -c 'until curl -s http://localhost:9100/metrics > /dev/null; do sleep 1; done' || (echo "Node exporter failed to start" && exit 1)
|
||||||
|
|
||||||
date +%s%N > pipeline_start_time.txt
|
date +%s%N > pipeline_start_time.txt
|
||||||
|
|
||||||
- name: Set up JDK 17
|
- name: Set up JDK 17
|
||||||
|
@ -65,88 +72,188 @@ jobs:
|
||||||
cache: maven
|
cache: maven
|
||||||
|
|
||||||
- name: Build with Maven
|
- name: Build with Maven
|
||||||
|
id: build
|
||||||
|
timeout-minutes: 15
|
||||||
|
env:
|
||||||
|
MAVEN_OPTS: "-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn"
|
||||||
run: |
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
echo "Creating checkstyle suppressions file..."
|
||||||
|
cat > checkstyle-suppressions.xml << 'EOF'
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<!DOCTYPE suppressions PUBLIC
|
||||||
|
"-//Checkstyle//DTD SuppressionFilter Configuration 1.2//EN"
|
||||||
|
"https://checkstyle.org/dtds/suppressions_1_2.dtd">
|
||||||
|
<suppressions>
|
||||||
|
<suppress files="node_exporter.*" checks="NoHttp"/>
|
||||||
|
</suppressions>
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Modifying checkstyle configuration..."
|
||||||
|
if [ -f "src/checkstyle/nohttp-checkstyle.xml" ]; then
|
||||||
|
sed -i '/<module name="Checker">/a \ <module name="SuppressionFilter">\n <property name="file" value="${config_loc}/checkstyle-suppressions.xml"/>\n </module>' src/checkstyle/nohttp-checkstyle.xml
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Starting Maven build..."
|
||||||
start_time=$(date +%s%N)
|
start_time=$(date +%s%N)
|
||||||
./mvnw -B verify
|
|
||||||
|
./mvnw -B verify \
|
||||||
|
-Dcheckstyle.config.location=src/checkstyle/nohttp-checkstyle.xml \
|
||||||
|
-Dcheckstyle.suppressions.location=checkstyle-suppressions.xml
|
||||||
|
|
||||||
|
build_status=$?
|
||||||
end_time=$(date +%s%N)
|
end_time=$(date +%s%N)
|
||||||
echo "BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
echo "BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
if [ $build_status -ne 0 ]; then
|
||||||
|
echo "::error::Maven build failed with status $build_status"
|
||||||
|
exit $build_status
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
|
id: test
|
||||||
|
if: success() || failure()
|
||||||
|
timeout-minutes: 20
|
||||||
run: |
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
start_time=$(date +%s%N)
|
start_time=$(date +%s%N)
|
||||||
./mvnw test
|
./mvnw test
|
||||||
|
test_status=$?
|
||||||
end_time=$(date +%s%N)
|
end_time=$(date +%s%N)
|
||||||
echo "TEST_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
echo "TEST_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||||
|
exit $test_status
|
||||||
|
|
||||||
- name: Build Docker image
|
- name: Build Docker image
|
||||||
|
id: docker-build
|
||||||
|
if: success()
|
||||||
|
timeout-minutes: 10
|
||||||
run: |
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
start_time=$(date +%s%N)
|
start_time=$(date +%s%N)
|
||||||
docker build -t app:latest .
|
|
||||||
|
docker build -t app:latest -f .devcontainer/Dockerfile . --no-cache
|
||||||
|
build_status=$?
|
||||||
end_time=$(date +%s%N)
|
end_time=$(date +%s%N)
|
||||||
echo "DOCKER_BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
echo "DOCKER_BUILD_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||||
|
exit $build_status
|
||||||
|
|
||||||
- name: Setup Kubernetes
|
- name: Setup Kubernetes
|
||||||
|
id: k8s-setup
|
||||||
|
if: success()
|
||||||
uses: helm/kind-action@v1
|
uses: helm/kind-action@v1
|
||||||
|
with:
|
||||||
|
wait: 120s
|
||||||
|
|
||||||
- name: Deploy to Kubernetes
|
- name: Deploy to Kubernetes
|
||||||
|
id: deploy
|
||||||
|
if: success()
|
||||||
|
timeout-minutes: 10
|
||||||
run: |
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
start_time=$(date +%s%N)
|
start_time=$(date +%s%N)
|
||||||
kubectl apply -f k8s/
|
kubectl apply -f k8s/ || (echo "Failed to apply Kubernetes manifests" && exit 1)
|
||||||
kubectl wait --for=condition=ready pod -l app=petclinic --timeout=180s
|
|
||||||
|
if ! kubectl wait --for=condition=ready pod -l app=petclinic --timeout=180s; then
|
||||||
|
echo "::error::Deployment failed - collecting debug information"
|
||||||
|
kubectl describe pods -l app=petclinic
|
||||||
|
kubectl logs -l app=petclinic --all-containers=true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
end_time=$(date +%s%N)
|
end_time=$(date +%s%N)
|
||||||
echo "DEPLOY_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
echo "DEPLOY_TIME=$((($end_time - $start_time)/1000000))" >> $GITHUB_ENV
|
||||||
|
|
||||||
# Export metrics with improved labeling and job naming
|
|
||||||
- name: Export metrics to Prometheus
|
- name: Export metrics to Prometheus
|
||||||
run: |
|
|
||||||
# Export timing metrics with descriptive labels
|
|
||||||
echo "pipeline_build_duration_ms{stage=\"build\",project=\"petclinic\"} ${{ env.BUILD_TIME }}" | curl --data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline
|
|
||||||
echo "pipeline_test_duration_ms{stage=\"test\",project=\"petclinic\"} ${{ env.TEST_TIME }}" | curl --data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline
|
|
||||||
echo "pipeline_docker_build_duration_ms{stage=\"docker-build\",project=\"petclinic\"} ${{ env.DOCKER_BUILD_TIME }}" | curl --data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline
|
|
||||||
echo "pipeline_deploy_duration_ms{stage=\"deploy\",project=\"petclinic\"} ${{ env.DEPLOY_TIME }}" | curl --data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline
|
|
||||||
|
|
||||||
# Export power consumption metrics
|
|
||||||
while IFS=, read -r timestamp watts; do
|
|
||||||
echo "power_consumption_watts{project=\"petclinic\"} $watts $timestamp" | curl --data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline
|
|
||||||
done < energy_metrics.csv
|
|
||||||
|
|
||||||
# Collect additional resource metrics
|
|
||||||
- name: Collect resource metrics
|
|
||||||
run: |
|
|
||||||
# Memory usage metric
|
|
||||||
echo "pipeline_memory_usage_bytes{project=\"petclinic\"} $(free -b | grep Mem: | awk '{print $3}')" | curl --data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline
|
|
||||||
|
|
||||||
# CPU usage metric
|
|
||||||
echo "pipeline_cpu_usage_percent{project=\"petclinic\"} $(top -bn1 | grep "Cpu(s)" | awk '{print $2}')" | curl --data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline
|
|
||||||
|
|
||||||
# Stop monitoring tools and collect metrics
|
|
||||||
- name: Collect metrics
|
|
||||||
if: always()
|
if: always()
|
||||||
|
timeout-minutes: 5
|
||||||
run: |
|
run: |
|
||||||
# End timestamp
|
set -eo pipefail
|
||||||
|
|
||||||
|
export_metric() {
|
||||||
|
local metric_name=$1
|
||||||
|
local metric_value=$2
|
||||||
|
local stage=$3
|
||||||
|
|
||||||
|
if [ -n "$metric_value" ]; then
|
||||||
|
echo "${metric_name}{stage=\"${stage}\",project=\"petclinic\"} ${metric_value}" | \
|
||||||
|
curl --retry 3 --retry-delay 2 --max-time 10 --silent --show-error \
|
||||||
|
--data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline || \
|
||||||
|
echo "::warning::Failed to export ${metric_name} for ${stage}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
export_metric "pipeline_build_duration_ms" "${BUILD_TIME}" "build"
|
||||||
|
export_metric "pipeline_test_duration_ms" "${TEST_TIME}" "test"
|
||||||
|
export_metric "pipeline_docker_build_duration_ms" "${DOCKER_BUILD_TIME}" "docker-build"
|
||||||
|
export_metric "pipeline_deploy_duration_ms" "${DEPLOY_TIME}" "deploy"
|
||||||
|
|
||||||
|
- name: Collect resource metrics
|
||||||
|
if: always()
|
||||||
|
timeout-minutes: 2
|
||||||
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
export_metric() {
|
||||||
|
local metric_name=$1
|
||||||
|
local metric_value=$2
|
||||||
|
local stage=$3
|
||||||
|
|
||||||
|
if [ -n "$metric_value" ]; then
|
||||||
|
echo "${metric_name}{stage=\"${stage}\",project=\"petclinic\"} ${metric_value}" | \
|
||||||
|
curl --retry 3 --retry-delay 2 --max-time 10 --silent --show-error \
|
||||||
|
--data-binary @- http://localhost:9091/metrics/job/petclinic-pipeline || \
|
||||||
|
echo "::warning::Failed to export ${metric_name} for ${stage}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
mem_usage=$(free -b | grep Mem: | awk '{print $3}') || echo "::warning::Failed to collect memory usage"
|
||||||
|
if [ -n "$mem_usage" ]; then
|
||||||
|
export_metric "pipeline_memory_usage_bytes" "$mem_usage" "memory"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cpu_usage=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}') || echo "::warning::Failed to collect CPU usage"
|
||||||
|
if [ -n "$cpu_usage" ]; then
|
||||||
|
export_metric "pipeline_cpu_usage_percent" "$cpu_usage" "cpu"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Collect final metrics
|
||||||
|
if: always()
|
||||||
|
timeout-minutes: 5
|
||||||
|
run: |
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
date +%s%N > pipeline_end_time.txt
|
date +%s%N > pipeline_end_time.txt
|
||||||
|
|
||||||
# Stop PowerAPI
|
if [ -n "$NODE_EXPORTER_PID" ]; then
|
||||||
sudo kill ${{ env.POWERAPI_PID }}
|
kill $NODE_EXPORTER_PID || echo "::warning::Failed to stop node exporter"
|
||||||
|
fi
|
||||||
|
|
||||||
# Stop node exporter
|
{
|
||||||
kill ${{ env.NODE_EXPORTER_PID }}
|
echo "=== System Resources ===" > system_metrics.txt
|
||||||
|
top -b -n 1 >> system_metrics.txt
|
||||||
|
} || echo "::warning::Failed to collect top metrics"
|
||||||
|
|
||||||
# Collect system metrics
|
{
|
||||||
top -b -n 1 > system_metrics.txt
|
echo "=== Memory Usage ===" > memory_metrics.txt
|
||||||
free -m > memory_metrics.txt
|
free -m >> memory_metrics.txt
|
||||||
df -h > disk_metrics.txt
|
} || echo "::warning::Failed to collect memory metrics"
|
||||||
|
|
||||||
|
{
|
||||||
|
echo "=== Disk Usage ===" > disk_metrics.txt
|
||||||
|
df -h >> disk_metrics.txt
|
||||||
|
} || echo "::warning::Failed to collect disk metrics"
|
||||||
|
|
||||||
# Save metrics as artifacts
|
|
||||||
- name: Save metrics
|
- name: Save metrics
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: pipeline-metrics
|
name: pipeline-metrics
|
||||||
path: |
|
path: |
|
||||||
energy_metrics.csv
|
|
||||||
system_metrics.txt
|
system_metrics.txt
|
||||||
memory_metrics.txt
|
memory_metrics.txt
|
||||||
disk_metrics.txt
|
disk_metrics.txt
|
||||||
pipeline_start_time.txt
|
pipeline_start_time.txt
|
||||||
pipeline_end_time.txt
|
pipeline_end_time.txt
|
||||||
|
retention-days: 90
|
||||||
|
if-no-files-found: warn
|
||||||
|
|
Loading…
Reference in a new issue