Commit 7ff66aa8 authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

remove redundant modules

parent 6dae7ea8
FROM csu-harbor.csst.nao:10443/csst/csst-msc-l1-sls-extraction-zero
USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin /usr/local/sbin
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-msc-l1-sls-extraction-zero
IMAGE_PATH:=/nfs/tmp/scalebox-images
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-msc-l1-sls-extraction-zero
docker build --network=host -t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
#!/bin/bash
#TRACE=yes 临时办法
rm -rf /work/*
message=$1
echo "message: "$message
cd /pipeline/output/
python /pipeline/app/run.py $obsid $detector
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
echo "======module.log======" >> /work/custom-out.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamp.txt /work/timestamps.txt
fi
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
if [ $exit_code -eq 0 ]; then
echo finish sls-extraction-zero.
# scalebox task add $1
else
echo "finish sls-extraction-zero, exit_code: $exit_code"
fi
rm -rf /pipeline/input/* /pipeline/output/*
exit $exit_code
CLUSTER=csst-nao
all: reset build
run:
# PGHOST=192.168.25.27 GRPC_SERVER=192.168.25.27 scalebox app create --env-file csu.env
PGPORT=9090 scalebox app create --env-file nao.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
# obsid="test.txt"
obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-sls-all.txt"
while IFS= read -r line
do
m="$line"
arr=($(echo $m | tr " " " "))
obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done < $obsid
~
name: msc-l1-sls-extraction-zero.app.process
label: 无缝光谱抽谱流水线
comment: 主巡天无缝光谱抽谱流水线
cluster: csst-nao
parameters:
initial_status: RUNNING
jobs:
msc-l1-sls-extraction-zero:
label: 无缝光谱抽谱
base_image: cnic/csst-msc-l1-sls-extraction-zero
# schedule_mode: HEAD
variables:
# always_running: yes
# reserved_on_exit: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
# grpc_server: 172.24.23.6:50051
parameters:
# start_message: 10160000068
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CCDS_SERVER_URL=${CCDS_SERVER_URL}
paths:
- ${CSST_AUX_ROOT}:/pipeline/aux:ro
- ${CSST_DFS_ROOT}:/dfs_root:ro
- ${CCDS_ROOT}:/ccds_root:ro
- ${CSST_AST_TEMP}:/pipeline/temp:rw
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
hosts:
- h0:5
- c0:10
# - c1:1
# - c2:1
# DFS
CSST_DFS_GATEWAY=10.3.10.28:30880
CSST_DFS_API_MODE=cluster
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
CSST_DFS_LOGS_DIR="."
# CCDS
CCDS_SERVER_URL=http://10.3.10.28:29000
CCDS_USER=USER
CCDS_PASS=PASS
# VOLUMES
CSST_DFS_ROOT=/nfsdata/share/dfs/dfs_root # /dfs_root:ro
CCDS_ROOT=/nfsdata/share/dfs/ccds_root # /ccds_root:ro
CSST_AUX_ROOT=/nfsdata/share/pipeline-inttest/aux # /pipeline/aux:ro
CSST_AST_TEMP=/nfsdata/share/pipeline-inttest/ast_temp # /pipeline/temp:rw
FROM csu-harbor.csst.nao:10443/csst/csst-msc-l1-sls
USER root
# 安装jq
RUN apt-get update \
&& apt-get install -y --no-install-recommends jq \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/agent /usr/local /usr/local
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=csu-harbor.csst.nao:10443/cnic/csst-msc-l1-sls
TAG?=latest
all: build push dist
build:
docker pull csu-harbor.csst.nao:10443/csst/csst-msc-l1-sls:$(TAG)
docker build --network=host -t $(IMAGE_NAME) .
push:
docker push $(IMAGE_NAME)
dist:
ssh sc1 docker pull $(IMAGE_NAME)
ssh sc2 docker pull $(IMAGE_NAME)
ssh sc3 docker pull $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ tencent-p0:/root/csst/csst-msc-l1-sls/
save:
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls.tar
# 在beta进行单核测试
## cpu
--cpuset-cpus="1"
- taskid 48060
- messagetxt 10609200000664-10
--cpuset-cpus="2"
- taskid 48097
- messagetxt 10609200000512-01
docker run -d --network=host --rm -e JOB_NAME=msc-l1-sls -e MAX_SLEEP_COUNT=20 -e TEXT_TRANC_MODE=TAIL -e LOCAL_IP_INDEX=1 -e SINK_JOB=msc-l1-sls-extraction-zero -e CSST_DFS_APP_TOKEN=test -e CCDS_SERVER_URL=http://10.3.10.28:29000 -e CLUSTER=csst-nao -e JOB_ID=79 -e GRPC_SERVER=10.3.10.28 -e OUTPUT_TEXT_SIZE=100000 -e CSST_DFS_API_MODE=cluster -e CSST_DFS_GATEWAY=10.3.10.28:30880 -e CSST_DFS_APP_ID=test -v /nfsdata/share/pipeline-inttest/aux:/pipeline/aux:ro -v /nfsdata/share/dfs/dfs_root:/dfs_root:ro -v /nfsdata/share/dfs/ccds_root:/ccds_root:ro -v /nfsdata/share/pipeline-inttest/ast_temp:/pipeline/temp:rw -v /etc/localtime:/etc/localtime:ro -v /:/local -v /share/scalebox/mydata/:/data cnic/csst-msc-l1-sls
\ No newline at end of file
#!/bin/bash
#TRACE=yes 临时办法
rm -rf /work/*
message=$1
echo "message: "$message
headers=$(echo "$2" | sed 's/\\//g')
echo "$headers" | jq '.'
echo "headers: $headers"
has_data_list=$(echo "$headers" | jq 'has("data_list")')
if [ "$has_data_list" = "true" ]; then
# 提取 data_list 字段的值
data_list=$(echo "$headers" | jq -r '.data_list')
fixed_data_list=$(echo "$data_list" | sed 's/\([a-f0-9]\{24\}\)/"\1"/g')
# 将 data_list 重新插入回 message
message=$(echo "$message" | jq --argjson dl "$fixed_data_list" '. + {data_list: $dl}')
message=$(echo "$message" | jq -c '.')
fi
cd /pipeline/output/
# python /pipeline/app/run.py $message
run -p $message
exit_code=$?
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
# echo "======module.log======" >> /work/custom-out.txt
# cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamps.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamps.txt /work/timestamps.txt
fi
headers=$2
pattern='"dag_run_id":"([^"]+)"'
if [[ $headers =~ $pattern ]]; then
dag_run_id="${BASH_REMATCH[1]}"
echo "dag_run_id: $dag_run_id"
else
# no dag_run_id in json
dag_run_id=""
fi
echo "dag_run_id:$dag_run_id" >> /work/extra-attributes.txt
# pattern='"sorted_tag":"([^"]+)"'
# if [[ $headers =~ $pattern ]]; then
# sorted_tag="${BASH_REMATCH[1]}"
# echo "sorted_tag: $sorted_tag"
# else
# # no sorted_tag in json
# sorted_tag=1
# fi
if [ $exit_code -eq 0 ]; then
echo "finish sls."
# scalebox task add --header sorted_tag=${sorted_tag} --header dag_run_id=${dag_run_id} --header repeatable=yes --upsert ${message}
else
echo "finish sls, exit_code: $exit_code"
fi
rm -rf /pipeline/input/* /pipeline/output/*
exit $exit_code
CLUSTER=csst-nao
# all: reset build
run:
PGPORT=9090 scalebox app create --env-file nao.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-sls-all.txt"
while IFS= read -r line
do
m="$line"
arr=($(echo $m | tr " " " "))
obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done < $obsid
~
name: msc-l1-sls.app.process
label: 无缝光谱
comment: 主巡天无缝光谱一级流水线
cluster: csst-nao
parameters:
initial_status: RUNNING
jobs:
msc-l1-sls:
label: 无缝光谱
base_image: cnic/csst-msc-l1-sls
# schedule_mode: HEAD
variables:
# always_running: yes
# reserved_on_exit: yes
# DFS入库错误重试3次
# retry_rules: "['202:3']"
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
# grpc_server: 172.24.23.6:50051
parameters:
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CCDS_SERVER_URL=${CCDS_SERVER_URL}
paths:
- ${CSST_AUX_ROOT}:/pipeline/aux:ro
- ${CSST_DFS_ROOT}:/dfs_root:ro
- ${CCDS_ROOT}:/ccds_root:ro
- ${CSST_AST_TEMP}:/pipeline/temp:rw
# sink_jobs:
hosts:
- h0:10
- c0:20
# - c1:3
# - c2:3
# DFS
CSST_DFS_GATEWAY=10.3.10.28:30880
CSST_DFS_API_MODE=cluster
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
CSST_DFS_LOGS_DIR="."
# CCDS
CCDS_SERVER_URL=http://10.3.10.28:29000
CCDS_USER=USER
CCDS_PASS=PASS
# VOLUMES
CSST_DFS_ROOT=/nfsdata/share/dfs/dfs_root # /dfs_root:ro
CCDS_ROOT=/nfsdata/share/dfs/ccds_root # /ccds_root:ro
CSST_AUX_ROOT=/nfsdata/share/pipeline-inttest/aux # /pipeline/aux:ro
CSST_AST_TEMP=/nfsdata/share/pipeline-inttest/ast_temp # /pipeline/temp:rw
# *.env: The env variables are used for running pipeline.
# TCC
# ENV_FILE=tcc.env
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=10.0.0.136:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://10.0.0.136:29000
# VOLUMES ==================
CSST_DFS_ROOT=/goosefsx/x-c70-y4s971cs-proxy/dfs
CSST_CRDS_ROOT=/goosefsx/x-c70-y4s971cs-proxy/crdsdata/data
CSST_AUX_DIR=/goosefsx/x-c70-y4s971cs-proxy/pipeline/aux
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/share/dfs
CSST_CRDS_ROOT=/share/crdsdata/data
CSST_AUX_DIR=/share/pipeline/aux
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/sharewcl/dfs
CSST_CRDS_ROOT=/sharewcl/crdsdata/data
CSST_AUX_DIR=/sharewcl/pipeline/aux
FROM csst/csst-msc-l1-sls1d:latest
USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin/ /usr/local/sbin/
RUN mkdir -p /work && echo "PATH=/app/bin:\${PATH}" >> /root/.bashrc
# RUN mkdir -p /work /app/bin && echo "PATH=/app/bin:\${PATH}" >> /root/.bashrc
WORKDIR /work
ENTRYPOINT [ "goagent" ]
IMAGE_NAME:=cnic/csst-msc-l1-sls1d-adm
IMAGE_PATH:=/goosefsx/x-c70-y4s971cs-proxy/temp
build:
docker build --network=host -t $(IMAGE_NAME) .
dist: build
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls1d-adm.tar
ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-sls1d-adm.tar
push:
docker push $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ tencent-p0:/root/csst/csst-msc-l1-sls1d/adm/
save:
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls1d-adm.tar
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment