Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ jobs:
consistency="TRUE"
build_yamls=$(find . -name 'build.yaml')
for build_yaml in $build_yamls; do
message=$(python3 .github/workflows/scripts/check-name-agreement.py "$build_yaml")
message=$(python3 .github/workflows/scripts/check_name_agreement.py "$build_yaml")
if [[ "$message" != *"consistent"* ]]; then
consistency="FALSE"
echo "Inconsistent service name and image name found in file $build_yaml."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ def check_service_image_consistency(data):
image_name = service_details.get("image", "")
# Extract the image name part after the last '/'
image_name_part = image_name.split("/")[-1].split(":")[0]
# Remove '-openeuler' suffix if it exists
fixed_service_name = service_name.rsplit("-openeuler", 1)[0]
# Check if the service name is a substring of the image name part
if service_name not in image_name_part:
if fixed_service_name not in image_name_part:
# Get the line number of the service name
line_number = service_details.lc.line + 1
inconsistencies.append((service_name, image_name, line_number))
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/scripts/codeScan/hadolint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ source /GenAIExamples/.github/workflows/scripts/change_color
log_dir=/GenAIExamples/.github/workflows/scripts/codeScan
ERROR_WARN=false

find . -type f \( -name "Dockerfile*" \) -print -exec hadolint --ignore DL3006 --ignore DL3007 --ignore DL3008 --ignore DL3013 --ignore DL3018 --ignore DL3016 {} \; > ${log_dir}/hadolint.log
find . -type f \( -name "Dockerfile*" \) -print -exec hadolint --ignore DL3033 --ignore DL3006 --ignore DL3007 --ignore DL3008 --ignore DL3013 --ignore DL3018 --ignore DL3016 {} \; > ${log_dir}/hadolint.log

if [[ $(grep -c "error" ${log_dir}/hadolint.log) != 0 ]]; then
$BOLD_RED && echo "Error!! Please Click on the artifact button to download and check error details." && $RESET
Expand Down
11 changes: 11 additions & 0 deletions ChatQnA/Dockerfile.openEuler
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

ARG IMAGE_REPO=opea
ARG BASE_TAG=latest
FROM $IMAGE_REPO/comps-base:$BASE_TAG-openeuler

COPY ./chatqna.py $HOME/chatqna.py
COPY ./entrypoint.sh $HOME/entrypoint.sh

ENTRYPOINT ["bash", "entrypoint.sh"]
184 changes: 184 additions & 0 deletions ChatQnA/docker_compose/intel/cpu/xeon/compose_openeuler.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

services:
redis-vector-db:
image: redis/redis-stack:7.2.0-v9
container_name: redis-vector-db
ports:
- "6379:6379"
- "8001:8001"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 10
dataprep-redis-service:
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler
container_name: dataprep-redis-server
depends_on:
redis-vector-db:
condition: service_healthy
tei-embedding-service:
condition: service_started
ports:
- "6007:5000"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
REDIS_URL: redis://redis-vector-db:6379
REDIS_HOST: redis-vector-db
INDEX_NAME: ${INDEX_NAME}
TEI_ENDPOINT: http://tei-embedding-service:80
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:5000/v1/health_check || exit 1"]
interval: 10s
timeout: 5s
retries: 50
restart: unless-stopped
tei-embedding-service:
image: openeuler/text-embeddings-inference-cpu:1.7.0-oe2403lts
container_name: tei-embedding-server
ports:
- "6006:80"
volumes:
- "${MODEL_CACHE:-./data}:/data"
shm_size: 1g
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
retriever:
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler
container_name: retriever-redis-server
depends_on:
- redis-vector-db
ports:
- "7000:7000"
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
REDIS_URL: redis://redis-vector-db:6379
REDIS_HOST: redis-vector-db
INDEX_NAME: ${INDEX_NAME}
TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
LOGFLAG: ${LOGFLAG}
RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS"
restart: unless-stopped
tei-reranking-service:
image: openeuler/text-embeddings-inference-cpu:1.7.0-oe2403lts
container_name: tei-reranking-server
ports:
- "8808:80"
volumes:
- "${MODEL_CACHE:-./data}:/data"
shm_size: 1g
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HUGGINGFACEHUB_API_TOKEN: ${HF_TOKEN}
HF_HUB_DISABLE_PROGRESS_BARS: 1
HF_HUB_ENABLE_HF_TRANSFER: 0
command: --model-id ${RERANK_MODEL_ID} --auto-truncate
vllm-service:
image: openeuler/vllm-cpu:0.9.1-oe2403lts
container_name: vllm-service
ports:
- "9009:80"
volumes:
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
shm_size: 128g
privileged: true
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HF_TOKEN: ${HF_TOKEN}
LLM_MODEL_ID: ${LLM_MODEL_ID}
VLLM_TORCH_PROFILER_DIR: "/mnt"
VLLM_CPU_KVCACHE_SPACE: 30
healthcheck:
test: ["CMD-SHELL", "curl -f http://$host_ip:9009/health || exit 1"]
interval: 10s
timeout: 10s
retries: 100
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80
chatqna-xeon-backend-server:
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}-openeuler
container_name: chatqna-xeon-backend-server
depends_on:
redis-vector-db:
condition: service_started
dataprep-redis-service:
condition: service_healthy
tei-embedding-service:
condition: service_started
retriever:
condition: service_started
tei-reranking-service:
condition: service_started
vllm-service:
condition: service_healthy
ports:
- "8888:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=chatqna-xeon-backend-server
- EMBEDDING_SERVER_HOST_IP=tei-embedding-service
- EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-80}
- RETRIEVER_SERVICE_HOST_IP=retriever
- RERANK_SERVER_HOST_IP=tei-reranking-service
- RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-80}
- LLM_SERVER_HOST_IP=vllm-service
- LLM_SERVER_PORT=80
- LLM_MODEL=${LLM_MODEL_ID}
- LOGFLAG=${LOGFLAG}
ipc: host
restart: always
chatqna-xeon-ui-server:
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}-openeuler
container_name: chatqna-xeon-ui-server
depends_on:
- chatqna-xeon-backend-server
ports:
- "5173:5173"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
ipc: host
restart: always
chatqna-xeon-nginx-server:
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}-openeuler
container_name: chatqna-xeon-nginx-server
depends_on:
- chatqna-xeon-backend-server
- chatqna-xeon-ui-server
ports:
- "${NGINX_PORT:-80}:80"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- FRONTEND_SERVICE_IP=chatqna-xeon-ui-server
- FRONTEND_SERVICE_PORT=5173
- BACKEND_SERVICE_NAME=chatqna
- BACKEND_SERVICE_IP=chatqna-xeon-backend-server
- BACKEND_SERVICE_PORT=8888
- DATAPREP_SERVICE_IP=dataprep-redis-service
- DATAPREP_SERVICE_PORT=5000
ipc: host
restart: always

networks:
default:
driver: bridge
54 changes: 54 additions & 0 deletions ChatQnA/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,24 +25,60 @@ services:
dockerfile: ./docker/Dockerfile.react
extends: chatqna
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}
chatqna-openeuler:
build:
context: ../
dockerfile: ./Dockerfile.openEuler
extends: chatqna
image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}-openeuler
chatqna-ui-openeuler:
build:
context: ../ui
dockerfile: ./docker/Dockerfile.openEuler
extends: chatqna-ui
image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest}-openeuler
chatqna-conversation-ui-openeuler:
build:
context: ../ui
dockerfile: ./docker/Dockerfile.react.openEuler
extends: chatqna-conversation-ui
image: ${REGISTRY:-opea}/chatqna-conversation-ui:${TAG:-latest}-openeuler
embedding:
build:
context: GenAIComps
dockerfile: comps/embeddings/src/Dockerfile
extends: chatqna
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}
embedding-openeuler:
build:
context: GenAIComps
dockerfile: comps/embeddings/src/Dockerfile.openEuler
extends: chatqna
image: ${REGISTRY:-opea}/embedding:${TAG:-latest}-openeuler
retriever:
build:
context: GenAIComps
dockerfile: comps/retrievers/src/Dockerfile
extends: chatqna
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}
retriever-openeuler:
build:
context: GenAIComps
dockerfile: comps/retrievers/src/Dockerfile.openEuler
extends: chatqna
image: ${REGISTRY:-opea}/retriever:${TAG:-latest}-openeuler
reranking:
build:
context: GenAIComps
dockerfile: comps/rerankings/src/Dockerfile
extends: chatqna
image: ${REGISTRY:-opea}/reranking:${TAG:-latest}
reranking-openeuler:
build:
context: GenAIComps
dockerfile: comps/rerankings/src/Dockerfile.openEuler
extends: chatqna
image: ${REGISTRY:-opea}/reranking:${TAG:-latest}-openeuler
llm-textgen:
build:
context: GenAIComps
Expand All @@ -61,12 +97,24 @@ services:
dockerfile: comps/dataprep/src/Dockerfile
extends: chatqna
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}
dataprep-openeuler:
build:
context: GenAIComps
dockerfile: comps/dataprep/src/Dockerfile.openEuler
extends: chatqna
image: ${REGISTRY:-opea}/dataprep:${TAG:-latest}-openeuler
guardrails:
build:
context: GenAIComps
dockerfile: comps/guardrails/src/guardrails/Dockerfile
extends: chatqna
image: ${REGISTRY:-opea}/guardrails:${TAG:-latest}
guardrails-openeuler:
build:
context: GenAIComps
dockerfile: comps/guardrails/src/guardrails/Dockerfile.openEuler
extends: chatqna
image: ${REGISTRY:-opea}/guardrails:${TAG:-latest}-openeuler
vllm-rocm:
build:
context: GenAIComps
Expand All @@ -90,3 +138,9 @@ services:
dockerfile: comps/third_parties/nginx/src/Dockerfile
extends: chatqna
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}
nginx-openeuler:
build:
context: GenAIComps
dockerfile: comps/third_parties/nginx/src/Dockerfile.openEuler
extends: chatqna
image: ${REGISTRY:-opea}/nginx:${TAG:-latest}-openeuler
Loading
Loading