Commit 7ff66aa8 authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

remove redundant modules

parent 6dae7ea8
FROM csu-harbor.csst.nao:10443/csst/csst-ifs-l1-qc0:latest
USER root
# 安装jq
RUN apt-get update \
&& apt-get install -y --no-install-recommends jq \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-ifs-l1-qc0
TAG?=latest
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-ifs-l1-qc0:$(TAG)
docker build --network=host -t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-csu:/root/csst/csst-ifs-l1-qc0/
\ No newline at end of file
#!/bin/bash
message=$1
echo "message: "$message
headers=$(echo "$2" | sed 's/\\//g')
echo "$headers" | jq '.'
echo "headers: $headers"
has_data_list=$(echo "$headers" | jq 'has("data_list")')
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
data_list=$(echo "$headers" | jq -r '.data_list')
fixed_data_list=$(echo "$data_list" | sed 's/\([a-f0-9]\{24\}\)/"\1"/g')
# 将 data_list 重新插入回 message
message=$(echo "$message" | jq --argjson dl "$fixed_data_list" '. + {data_list: $dl}')
message=$(echo "$message" | jq -c '.')
fi
cd /pipeline/output/
# python /pipeline/app/run.py $message
run -p $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
# echo "======module.log======" >> /work/custom-out.txt
# cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
rm -rf /pipeline/input/* /pipeline/output/*
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
pattern='"sorted_tag":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
sorted_tag="${BASH_REMATCH[1]}"
echo "sorted_tag: $sorted_tag"
else
# no sorted_tag in json
sorted_tag=1
fi
if [ $exit_code -eq 0 ]; then
# scalebox task add --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
strdatalist=$(echo "$fixed_data_list" | tr -d '[:space:]')
scalebox task add --header sorted_tag=${sorted_tag} --header dag_run_id=${dag_run_id} --header data_list=${strdatalist} --header repeatable=yes --upsert $1
else
scalebox task add --header sorted_tag=${sorted_tag} --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
fi
echo "finish ifs-l1-qc0."
else
echo "finish ifs-l1-qc0, exit_code: $exit_code"
fi
exit $exit_code
FROM csu-harbor.csst.nao:10443/csst/csst-ifs-l1-rss:latest
USER root
# 安装jq
RUN apt-get update \
&& apt-get install -y --no-install-recommends jq \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-ifs-l1-rss
TAG?=latest
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-ifs-l1-rss:$(TAG)
docker build --network=host -t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-zjs:/root/csst/csst-ifs-l1-rss/
#!/bin/bash
message=$1
echo "message: "$message
headers=$(echo "$2" | sed 's/\\//g')
echo "$headers" | jq '.'
echo "headers: $headers"
has_data_list=$(echo "$headers" | jq 'has("data_list")')
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
data_list=$(echo "$headers" | jq -r '.data_list')
fixed_data_list=$(echo "$data_list" | sed 's/\([a-f0-9]\{24\}\)/"\1"/g')
# 将 data_list 重新插入回 message
message=$(echo "$message" | jq --argjson dl "$fixed_data_list" '. + {data_list: $dl}')
message=$(echo "$message" | jq -c '.')
fi
cd /pipeline/output/
# python /pipeline/app/run.py $message
run -p $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
# echo "======module.log======" >> /work/custom-out.txt
# cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
rm -rf /pipeline/input/* /pipeline/output/*
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
pattern='"sorted_tag":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
sorted_tag="${BASH_REMATCH[1]}"
echo "sorted_tag: $sorted_tag"
else
# no sorted_tag in json
sorted_tag=1
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
if [ $exit_code -eq 0 ]; then
# scalebox task add --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
echo "finish ifs-l1-rss."
else
echo "finish ifs-l1-rss, exit_code: $exit_code"
fi
exit $exit_code
CLUSTER=csst
all: reset build
run:
scalebox app create --env-file zjs.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
#for m in {30100000001..30100000050}; do
for m in {30100000003..30100000050}; do
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
name: ifs-l1-rss.app.process
cluster: csst
parameters:
initial_status: RUNNING
jobs:
ifs-l1-rss:
base_image: cnic/csst-ifs-l1-rss
schedule_mode: HEAD
variables:
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
grpc_server: 172.24.23.6:50051
parameters:
# start_message: scalebox
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
paths:
- ${AUX_DIR}:/L1Pipeline/aux
- ${CRDS_DIR}:/L1Pipeline/crds
- ${DFS_ROOT}:/L1Pipeline/dfs
- /sharewcl/shao/docker/csst_ifs_l1_rss/code:/L1Pipeline/workspace
# command: docker run -d --network=host %ENVS% %VOLUMES% {{IMAGE}}
# sink_jobs:
# hosts:
# - h0:1
# - c0:2
# - c1:2
# - c2:2
# AUX_DIR=/sharewcl/shao/L1Pipeline/aux
# CRDS_DIR=/sharewcl/shao/L1Pipeline/crds
# DFS_ROOT=/sharewcl/shao/L1Pipeline/dfs
AUX_DIR=/sharewcl/shao/L1Pipeline/aux
CRDS_DIR=/sharewcl/shao/L1Pipeline/crds
DFS_ROOT=/sharewcl/dfs
#AUX_DIR=/share/shao/L1Pipeline/aux
#CRDS_DIR=/share/shao/L1Pipeline/crds
#DFS_ROOT=/share/dfs
ARG HARBOR_PATH
ARG MODULE_NAME
ARG TAG
FROM ${HARBOR_PATH}/csst/${MODULE_NAME}:${TAG}
# FROM csu-harbor.csst.nao:10443/csst/csst-cpic-l1:latest
USER root
# 安装jq
RUN apt-get update \
&& apt-get install -y --no-install-recommends jq \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY *.sh /app/bin/
RUN chmod +x /app/bin/*.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
ENV HARBOR_PATH=${HARBOR_PATH}
ENV MODULE_NAME=${MODULE_NAME}
ENV TAG=${TAG}
USER csst
ENTRYPOINT ["goagent"]
-include config.env
$(info MODULE_NAME = $(MODULE_NAME))
$(info HARBOR_PATH = $(HARBOR_PATH))
$(info SINK_MODULE = $(SINK_MODULE))
# $(info TAG = $(TAG))
IMAGE_NAME:=$(HARBOR_PATH)/cnic/$(MODULE_NAME)
TAG?=latest
all: build push dist
build:
docker pull $(HARBOR_PATH)/csst/$(MODULE_NAME):$(TAG)
docker build --network=host \
--build-arg MODULE_NAME=$(MODULE_NAME) \
--build-arg HARBOR_PATH=$(HARBOR_PATH) \
--build-arg TAG=$(TAG) \
-t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it \
-e MODULE_NAME=${MODULE_NAME} \
-e HARBOR_PATH=${HARBOR_PATH} \
-e TAG=${TAG} \
-e SINK_MODULE=${SINK_MODULE} \
--entrypoint bash \
${IMAGE_NAME}
down:
docker stop $(IMAGE_NAME)
\ No newline at end of file
MODULE_NAME=csst-ifs-l1
HARBOR_PATH=csu-harbor.csst.nao:10443
SINK_MODULE=0
#!/bin/bash
message=$1
echo "message: "$message
headers=$(echo "$2" | sed 's/\\//g')
echo "$headers" | jq '.'
echo "headers: $headers"
has_data_list=$(echo "$headers" | jq 'has("data_list")')
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
data_list=$(echo "$headers" | jq -r '.data_list')
fixed_data_list=$(echo "$data_list" | sed 's/\([a-f0-9]\{24\}\)/"\1"/g')
# 将 data_list 重新插入回 message
message=$(echo "$message" | jq --argjson dl "$fixed_data_list" '. + {data_list: $dl}')
message=$(echo "$message" | jq -c '.')
fi
cd /pipeline/output/
run -p $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
rm -rf /pipeline/input/* /pipeline/output/*
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
if [ $exit_code -eq 0 ]; then
# 判断是否有后续模块,有后续模块,则下发给后续模块任务
if [ "${SINK_MODULE:-0}" = "1" ]; then
# if [ "$SINK_MODULE" = "1" ]; then
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
strdatalist=$(echo "$fixed_data_list" | tr -d '[:space:]')
scalebox task add --header sorted_tag=${sorted_tag} --header dag_run_id=${dag_run_id} --header data_list=${strdatalist} --header repeatable=yes --upsert $1
else
scalebox task add --header sorted_tag=${sorted_tag} --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
fi
echo "finish ${MODULE_NAME}, start sink-job."
else
#没有后续模块直接结束
echo "finish ${MODULE_NAME}."
exit $exit_code
fi
else
echo "finish ${MODULE_NAME}, exit_code: $exit_code"
fi
exit $exit_code
FROM csu-harbor.csst.nao:10443/csst/csst-mci-l1:latest
USER root
# 安装jq
RUN apt-get update \
&& apt-get install -y --no-install-recommends jq \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-mci-l1
TAG?=latest
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-mci-l1:$(TAG)
docker build --network=host -t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
#!/bin/bash
message=$1
echo "message: "$message
headers=$(echo "$2" | sed 's/\\//g')
echo "$headers" | jq '.'
echo "headers: $headers"
has_data_list=$(echo "$headers" | jq 'has("data_list")')
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
data_list=$(echo "$headers" | jq -r '.data_list')
fixed_data_list=$(echo "$data_list" | sed 's/\([a-f0-9]\{24\}\)/"\1"/g')
# 将 data_list 重新插入回 message
message=$(echo "$message" | jq --argjson dl "$fixed_data_list" '. + {data_list: $dl}')
message=$(echo "$message" | jq -c '.')
fi
cd /pipeline/output/
# python /pipeline/app/run.py $message
# python l1_pipeline_script_ifs_rss_simdata.py $obsid
run -p $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
# echo "======module.log======" >> /work/custom-out.txt
# cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
rm -rf /pipeline/input/* /pipeline/output/*
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
if [ $exit_code -eq 0 ]; then
# scalebox task add --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
echo "finish mci."
else
echo "finish mci, exit_code: $exit_code"
fi
exit $exit_code
# python /L1Pipeline/workspace/l1_pipeline_script_mci_shao_simdata.py $obsid
CLUSTER=csst
all: reset build
run:
scalebox app create --env-file zjs.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
#for m in {20100000001..20100000005}; do
for m in {20100000002..20100000005}; do
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
name: mci-l1.app.process
cluster: csst
parameters:
initial_status: RUNNING
jobs:
mci-l1:
base_image: cnic/csst-mci-l1
# schedule_mode: HEAD
variables:
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
parameters:
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
# start_message: 20100000001
paths:
- ${AUX_DIR}:/L1Pipeline/aux
- ${CRDS_DIR}:/L1Pipeline/crds
- ${DFS_ROOT}:/L1Pipeline/dfs
- /sharewcl/shao/docker/csst_mci_l1/code:/L1Pipeline/workspace
# paths:
# - ${AUX_DIR}:/L1Pipeline/aux
# - ${CRDS_DIR}:/L1Pipeline/aux/C6.1_ref_crds
# - ${DFS_ROOT}:/dfsroot
# command: docker run -d --network=host %ENVS% %VOLUMES% {{IMAGE}}
# sink_jobs:
hosts:
# - h0:1
# - c0:2
# - c1:2
# - c2:2
- n0:1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment