Commit 7ff66aa8 authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

remove redundant modules

parent 6dae7ea8
#!/bin/bash
message=$1
echo "message: "$message
cd /pipeline/output/
python /pipeline/app/run.py $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
echo "======module.log======" >> /work/custom-out.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
rm -rf /pipeline/input/* /pipeline/output/*
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
if [ $exit_code -eq 0 ]; then
# scalebox task add --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
echo "$1" > ${WORK_DIR}/messages.txt
echo "finish hstdm-l2."
else
echo "finish hstdm-l2, exit_code: $exit_code"
fi
exit $exit_code
CLUSTER=csst
all: reset build
run:
scalebox app create --env-file zjs.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
for m in {50100000001..50100000002}; do
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
name: hstdm-l2.app.process
label: 太赫兹
comment: 太赫兹二级流水线
cluster: csst
parameters:
initial_status: RUNNING
jobs:
hstdm-l1:
label: 太赫兹L2
base_image: cnic/csst-hstdm-l2
schedule_mode: HEAD
variables:
always_running: yes
reserved_on_exit: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
parameters:
# start_message: 50100000001
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CRDS_SERVER_URL=${CRDS_SERVER_URL}
paths:
- ${CSST_AUX_DIR}:/pipeline/aux
- ${CSST_DFS_ROOT}:/dfsroot
- ${CSST_CRDS_ROOT}:/crdsref
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
# sink_jobs:
# hosts:
# - h0:1
# - c0:1
# - c1:1
# - c2:1
AUX_DIR=/goosefsx/x_c60_o19xp6c1_proxy/L1Pipeline/aux
CRDS_DIR=/goosefsx/x_c60_o19xp6c1_proxy/L1Pipeline/aux/products_ref20_3hdr
DFS_ROOT=/goosefsx/x_c60_o19xp6c1_proxy/dfs_root
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/share/dfs
CSST_CRDS_ROOT=/share/crdsdata/data
CSST_AUX_DIR=/share/pipeline/aux
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/sharewcl/dfs
CSST_CRDS_ROOT=/sharewcl/crdsdata/data/references
CSST_AUX_DIR=/sharewcl/pipeline/aux
FROM csu-harbor.csst.nao:10443/csst/csst-ifs-l1-cube:latest
USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-ifs-l1-cube
IMAGE_PATH:=/nfs/tmp/scalebox-images
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-ifs-l1-cube
docker build --network=host -t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-csu:/root/csst/csst-ifs-l1-cube/
\ No newline at end of file
FROM csst/csst-ifs-l1-cube-adm:latest
LABEL maintainer="Xiaoli Zhang<zhangxiaoli@cnic.cn>"
USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin /usr/local/sbin/
RUN mkdir -p /work && echo "PATH=/app/bin:\${PATH}" >> /root/.bashrc
#WORKDIR /work/
WORKDIR /L1Pipeline/workspace
ENTRYPOINT [ "goagent" ]
IMAGE_NAME:=cnic/csst-ifs-l1-cube-adm
IMAGE_PATH:=/sharewcl/temp
build:
docker build --network=host -t $(IMAGE_NAME) .
dist: build
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/ifs-l1-cube-adm.tar
ssh c0 docker load -i $(IMAGE_PATH)/ifs-l1-cube-adm.tar
ssh c1 docker load -i $(IMAGE_PATH)/ifs-l1-cube-adm.tar
ssh c2 docker load -i $(IMAGE_PATH)/ifs-l1-cube-adm.tar
ssh n0 docker load -i $(IMAGE_PATH)/ifs-l1-cube-adm.tar
ssh n1 docker load -i $(IMAGE_PATH)/ifs-l1-cube-adm.tar
push:
docker push $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-zjs:/root/csst/csst-ifs-l1-cube/adm/
save:
docker save $(IMAGE_NAME) > /sharewcl/temp/ifs-l1-cube-adm.tar
\ No newline at end of file
#!/bin/bash
python l1_pipeline_script_ifs_cube_adm.py
exit_code=$?
mkdir -p /work
mv /L1Pipeline/workspace/list_output/cube_merge_obsid_list_confirm.txt /work/messages.txt
sed -i "s/^/${SINK_JOB},/g" /work/messages.txt
echo finish csst-ifs-l1-cube-adm.
exit $exit_code
FROM csst/csst-ifs-l1-cube-exe:latest
USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin/ /usr/local/sbin/
RUN mkdir -p /work && echo "PATH=/app/bin:\${PATH}" >> /root/.bashrc
WORKDIR /L1Pipeline/workspace
ENTRYPOINT [ "goagent" ]
IMAGE_NAME:=cnic/csst-ifs-l1-cube-exe
IMAGE_PATH:=/sharewcl/temp
build:
docker build --network=host -t $(IMAGE_NAME) .
dist: build
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/ifs-l1-cube-exe.tar
ssh c0 docker load -i $(IMAGE_PATH)/ifs-l1-cube-exe.tar
ssh c1 docker load -i $(IMAGE_PATH)/ifs-l1-cube-exe.tar
ssh c2 docker load -i $(IMAGE_PATH)/ifs-l1-cube-exe.tar
ssh n0 docker load -i $(IMAGE_PATH)/ifs-l1-cube-exe.tar
ssh n1 docker load -i $(IMAGE_PATH)/ifs-l1-cube-exe.tar
push:
docker push $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-zjs:/root/csst/ifs-l1-cube/exe/
save:
docker save $(IMAGE_NAME) > /sharewcl/temp/ifs-l1-cube-exe.tar
\ No newline at end of file
#!/bin/bash
echo input:$1*
python l1_pipeline_script_ifs_cube_simdata.py $1
exit_code=$?
mkdir -p /work
echo finish csst-ifs-l1-cube-exe.
exit $exit_code
#!/bin/bash
message=$1
echo "message: "$message
cd /pipeline/output/
python /pipeline/app/run.py $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
echo "======module.log======" >> /work/custom-out.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
rm -rf /pipeline/input/* /pipeline/output/*
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
if [ $exit_code -eq 0 ]; then
# scalebox task add --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
echo "finish ifs-l1-cube."
else
echo "finish ifs-l1-cube, exit_code: $exit_code"
fi
exit $exit_code
CLUSTER=csst
all: reset build
run:
scalebox app create --env-file zjs.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
scp:
scp -r ../ csst-zjs:/root/csst/csst-ifs-l1-cube/
\ No newline at end of file
name: ifs-l1-cube.app.process
label: 多次曝光合并
comment: 积分场光谱仪多次曝光合并一级流水线
cluster: csst
parameters:
initial_status: RUNNING
jobs:
csst-ifs-l1-cube-adm:
label: 多次曝光合并管理模块
base_image: cnic/csst-ifs-l1-cube-adm:latest
#schedule_mode: HEAD
variables:
# always_running: yes
output_text_size: 100000
# DFS入库错误重试3次
retry_rules: "['202:3']"
text_tranc_mode: TAIL
locale_mode: NONE
# grpc_server: 172.24.23.6:50051
parameters:
start_message: ANY
environments:
- CSST_DFS_API_MODE=cluster
- CSST_DFS_GATEWAY=172.24.27.2:30880
- CSST_DFS_APP_ID=test
- CSST_DFS_APP_TOKEN=test
paths: # change /share to /sharewcl in PML production environment
- ${AUX_DIR}:/L1Pipeline/aux
- ${CRDS_DIR}:/L1Pipeline/crds
- ${DFS_ROOT}:/L1Pipeline/dfs
sink_jobs:
- csst-ifs-l1-cube-exe
# command: >
# bash -c "python /pipeline/src/run.py && echo '---' && cat /pipeline/output/msg.txt"
hosts:
# - h0:1
# - c0:1
# - c1:1
# - c2:1
- n0:1
csst-ifs-l1-cube-exe:
label: 多次曝光合并执行模块
base_image: cnic/csst-ifs-l1-cube-exe:latest
# schedule_mode: HEAD
variables:
# always_running: yes
output_text_size: 100000
# DFS入库错误重试3次
retry_rules: "['202:3']"
text_tranc_mode: TAIL
locale_mode: NONE
# grpc_server: 172.24.23.6:50051
environments:
- CSST_DFS_API_MODE=cluster
- CSST_DFS_GATEWAY=172.24.27.2:30880
- CSST_DFS_APP_ID=test
- CSST_DFS_APP_TOKEN=test
paths: # change /share to /sharewcl in PML production environment
- ${AUX_DIR}:/L1Pipeline/aux
- ${CRDS_DIR}:/L1Pipeline/crds
- ${DFS_ROOT}:/L1Pipeline/dfs
# command: >
# python /pipeline/src/run.py {\"brick_id\":1}
hosts:
# - h0:1
# - c0:1
# - c1:1
# - c2:2
- n0:1
\ No newline at end of file
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/share/dfs
CSST_CRDS_ROOT=/share/crdsdata/data
CSST_AUX_DIR=/share/pipeline/aux
# IFS
AUX_DIR=/share/shao/L1Pipeline/aux
CRDS_DIR=/share/shao/L1Pipeline/crds
DFS_ROOT=/share/dfs
\ No newline at end of file
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/sharewcl/dfs
CSST_CRDS_ROOT=/sharewcl/crdsdata/data
CSST_AUX_DIR=/sharewcl/pipeline/aux
# IFS
AUX_DIR=/sharewcl/shao/L1Pipeline/aux
CRDS_DIR=/sharewcl/shao/L1Pipeline/crds
DFS_ROOT=/sharewcl/dfs
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment