Commit 7ff66aa8 authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

remove redundant modules

parent 6dae7ea8
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-msc-l1-mbi-phot
TAG?=latest
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-msc-l1-mbi-phot:$(TAG)
docker build --network=host -t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
#!/bin/bash
#TRACE=yes 临时办法
rm -rf /work/*
message=$1
echo "message: "$message
headers=$(echo "$2" | sed 's/\\//g')
echo "$headers" | jq '.'
echo "headers: $headers"
has_data_list=$(echo "$headers" | jq 'has("data_list")')
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
data_list=$(echo "$headers" | jq -r '.data_list')
fixed_data_list=$(echo "$data_list" | sed 's/\([a-f0-9]\{24\}\)/"\1"/g')
# 将 data_list 重新插入回 message
message=$(echo "$message" | jq --argjson dl "$fixed_data_list" '. + {data_list: $dl}')
message=$(echo "$message" | jq -c '.')
fi
cd /pipeline/output/
# python /pipeline/app/run.py $message
run -p $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
# echo "======module.log======" >> /work/custom-out.txt
# cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
if [ $exit_code -eq 0 ]; then
echo finish photmix.
# scalebox task add $1
else
echo "finish photmix, exit_code: $exit_code"
fi
rm -rf /pipeline/input/* /pipeline/output/*
exit $exit_code
CLUSTER=csst-nao
all: reset build
run:
# PGHOST=192.168.25.27 GRPC_SERVER=192.168.25.27 scalebox app create --env-file csu.env
PGPORT=9090 scalebox app create --env-file zj.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
# obsid="test.txt"
obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-mbi-all.txt"
while IFS= read -r line
do
m="$line"
arr=($(echo $m | tr " " " "))
obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done < $obsid
~
name: msc-l1-mbi-photmix.app.process
label: 多色成像测光流水线
comment: 主巡天多色成像测光流水线
cluster: csst-nao
parameters:
initial_status: RUNNING
jobs:
msc-l1-mbi-photmix:
label: 多色成像测光
base_image: cnic/csst-msc-l1-mbi-photmix
# schedule_mode: HEAD
variables:
# always_running: yes
# reserved_on_exit: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
# grpc_server: 172.24.23.6:50051
parameters:
# start_message: 10160000068
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CCDS_SERVER_URL=${CCDS_SERVER_URL}
paths:
- ${CSST_AUX_ROOT}:/pipeline/aux
- ${CSST_DFS_ROOT}:/dfs_root
- ${CCDS_ROOT}:/ccds_root
- ${CSST_AST_TEMP}:/pipeline/temp
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
hosts:
- h0:5
- c0:10
# - c1:1
# - c2:1
# DFS
CSST_DFS_GATEWAY=10.3.10.28:30880
CSST_DFS_API_MODE=cluster
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
CSST_DFS_LOGS_DIR="."
# CCDS
CCDS_SERVER_URL=http://10.3.10.28:29000
CCDS_USER=USER
CCDS_PASS=PASS
# VOLUMES
CSST_DFS_ROOT=/nfsdata/share/dfs/dfs_root # /dfs_root:ro
CCDS_ROOT=/nfsdata/share/dfs/ccds_root # /ccds_root:ro
CSST_AUX_ROOT=/nfsdata/share/pipeline-inttest/aux # /pipeline/aux:ro
CSST_AST_TEMP=/nfsdata/share/pipeline-inttest/ast_temp # /pipeline/temp:rw
# DFS
CSST_DFS_GATEWAY=172.24.232.11:30880
CSST_DFS_API_MODE=cluster
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
CSST_DFS_LOGS_DIR=.
# CCDS
CCDS_SERVER_URL=http://172.24.232.11:29000
CCDS_USER=USER
CCDS_PASS=PASS
# VOLUMES
# /dfs_root:ro
CSST_DFS_ROOT=/data/dfs/files
# /ccds_root:ro
CCDS_ROOT=/data/ccds/files
# /pipeline/temp:rw
CSST_AST_TEMP=/data/ast_temp
# HARBOR
HARBOR=zjlab-harbor.csst.nao
# TEST
CSST_INTTEST_ROOT=/data/pipeline-inttest
VERBOSE=true
FROM csu-harbor.csst.nao:10443/csst/csst-msc-l1-mbi
USER root
# 安装jq
RUN apt-get update \
&& apt-get install -y --no-install-recommends jq \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-msc-l1-mbi
TAG?=latest
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-msc-l1-mbi:$(TAG)
docker build --network=host -t $(IMAGE_NAME).
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-csu:/root/csst/csst-msc-l1-mbi/
#!/bin/bash
#TRACE=yes 临时办法
rm -rf /work/*
message=$1
echo "message: "$message
headers=$(echo "$2" | sed 's/\\//g')
echo "$headers" | jq '.'
echo "headers: $headers"
has_data_list=$(echo "$headers" | jq 'has("data_list")')
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
data_list=$(echo "$headers" | jq -r '.data_list')
fixed_data_list=$(echo "$data_list" | sed 's/\([a-f0-9]\{24\}\)/"\1"/g')
# 将 data_list 重新插入回 message
message=$(echo "$message" | jq --argjson dl "$fixed_data_list" '. + {data_list: $dl}')
message=$(echo "$message" | jq -c '.')
fi
cd /pipeline/output/
# python /pipeline/app/run.py $message
run -p $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
# echo "======module.log======" >> /work/custom-out.txt
# cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
pattern='"sorted_tag":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
sorted_tag="${BASH_REMATCH[1]}"
echo "sorted_tag: $sorted_tag"
else
# no sorted_tag in json
sorted_tag=1
fi
if [ $exit_code -eq 0 ]; then
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
strdatalist=$(echo "$fixed_data_list" | tr -d '[:space:]')
scalebox task add --header sorted_tag=${sorted_tag} --header dag_run_id=${dag_run_id} --header data_list=${strdatalist} --header repeatable=yes --upsert $1
else
scalebox task add --header sorted_tag=${sorted_tag} --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
fi
echo "finish mbi, start sink-job."
else
echo "finish mbi, exit_code: $exit_code"
fi
rm -rf /pipeline/input/* /pipeline/output/*
exit $exit_code
CLUSTER=csst-nao
all: reset build
run:
# PGHOST=192.168.25.27 GRPC_SERVER=192.168.25.27 scalebox app create --env-file csu.env
PGPORT=9090 scalebox app create --env-file zj.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
obsid="mbi.txt"
# obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-mbi-all.txt"
while IFS= read -r line
do
m="$line"
arr=($(echo $m | tr " " " "))
obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done < $obsid
~
name: msc-l1-mbi.app.process
label: 多色成像+测光
comment: 主巡天多色成像一级流水线
cluster: csst-nao
parameters:
initial_status: RUNNING
jobs:
msc-l1-mbi:
label: 多色成像
base_image: cnic/csst-msc-l1-mbi
# schedule_mode: HEAD
variables:
# always_running: yes
# reserved_on_exit: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
# grpc_server: 172.24.23.6:50051
parameters:
# start_message: 10160000068
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CCDS_SERVER_URL=${CCDS_SERVER_URL}
paths:
- ${CSST_AUX_ROOT}:/pipeline/aux
- ${CSST_DFS_ROOT}:/dfs_root
- ${CCDS_ROOT}:/ccds_root
- ${CSST_AST_TEMP}:/pipeline/temp
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
sink_jobs:
- msc-l1-mbi-photmix
hosts:
- h0:1
# - c0:20
# - c1:1
# - c2:1
msc-l1-mbi-photmix:
label: 多色成像测光
base_image: cnic/csst-msc-l1-mbi-photmix
# schedule_mode: HEAD
variables:
# always_running: yes
# reserved_on_exit: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
# grpc_server: 172.24.23.6:50051
parameters:
# start_message: 10160000068
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CCDS_SERVER_URL=${CCDS_SERVER_URL}
paths:
- ${CSST_AUX_ROOT}:/pipeline/aux:ro
- ${CSST_DFS_ROOT}:/dfs_root:ro
- ${CCDS_ROOT}:/ccds_root:ro
- ${CSST_AST_TEMP}:/pipeline/temp:rw
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
hosts:
- h0:1
# - c0:10
# - c1:1
# - c2:1
# DFS
CSST_DFS_GATEWAY=192.168.25.89:28000
CSST_DFS_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQ4ODU0NTA2NjQsInN1YiI6InN5c3RlbSJ9.POsuUABytu8-WMtZiYehiYEa5BnlgqNTXT6X3OTyix0
# CCDS
CCDS_SERVER_URL=http://192.168.25.89:29000
CCDS_USER=USER
CCDS_PASS=PASS
# VOLUMES
CSST_DFS_ROOT=/nfs/dfs/dfs_root # /dfs_root:ro
CCDS_ROOT=/nfs/dfs/ccds_root # /ccds_root:ro
CSST_AST_TEMP=/nfs/pipeline-inttest/ast_temp # /pipeline/temp:rw
CSST_AUX_ROOT=/nfs/pipeline-inttest/aux # /pipeline/aux:ro
# TEST
CSST_INTTEST_ROOT=/nfs/pipeline-inttest
VERBOSE=true
\ No newline at end of file
# DFS
CSST_DFS_GATEWAY=10.3.10.28:30880
CSST_DFS_API_MODE=cluster
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
CSST_DFS_LOGS_DIR="."
# CCDS
CCDS_SERVER_URL=http://10.3.10.28:29000
CCDS_USER=USER
CCDS_PASS=PASS
# VOLUMES
CSST_DFS_ROOT=/nfsdata/share/dfs/dfs_root # /dfs_root:ro
CCDS_ROOT=/nfsdata/share/dfs/ccds_root # /ccds_root:ro
CSST_AUX_ROOT=/nfsdata/share/pipeline-inttest/aux # /pipeline/aux:ro
CSST_AST_TEMP=/nfsdata/share/pipeline-inttest/ast_temp # /pipeline/temp:rw
# *.env: The env variables are used for running pipeline.
# TCC
# ENV_FILE=tcc.env
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=10.0.0.136:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://10.0.0.136:29000
# VOLUMES ==================
CSST_DFS_ROOT=/goosefsx/x-c70-y4s971cs-proxy/dfs
CSST_CRDS_ROOT=/goosefsx/x-c70-y4s971cs-proxy/crdsdata/data
CSST_AUX_DIR=/goosefsx/x-c70-y4s971cs-proxy/pipeline/aux
# DFS
CSST_DFS_GATEWAY=172.24.232.11:30880
CSST_DFS_API_MODE=cluster
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
CSST_DFS_LOGS_DIR=.
# CCDS
CCDS_SERVER_URL=http://172.24.232.11:29000
CCDS_USER=USER
CCDS_PASS=PASS
# VOLUMES
# /dfs_root:ro
CSST_DFS_ROOT=/data/dfs/files
# /ccds_root:ro
CCDS_ROOT=/data/ccds/files
# /pipeline/temp:rw
CSST_AST_TEMP=/data/ast_temp
# HARBOR
HARBOR=zjlab-harbor.csst.nao
# TEST
CSST_INTTEST_ROOT=/data/pipeline-inttest
VERBOSE=true
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/share/dfs
CSST_CRDS_ROOT=/share/crdsdata/data
CSST_AUX_DIR=/share/pipeline/aux
# AUX_DIR=/sharewcl/pipeline/aux
# CRDS_DIR=/sharewcl/OnOrbitCal/SimData/ref_202211/products_ref20_3hdr
# DFS_ROOT=/sharewcl/dfs
# AUX_DIR=/share/pipeline/aux
# CRDS_DIR=/share/OnOrbitCal/SimData/ref_202211/products_ref20_3hdr
# DFS_ROOT=/share/dfs
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/sharewcl/dfs
CSST_CRDS_ROOT=/sharewcl/crdsdata/data
CSST_AUX_DIR=/sharewcl/pipeline/aux
FROM csu-harbor.csst.nao:10443/csst/csst-msc-l1-ooc
USER root
# 安装jq
RUN apt-get update \
&& apt-get install -y --no-install-recommends jq \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment