Commit 5031945b authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

add hstdm-l2

parent fe641370
FROM csst/csst-hstdm-l2:latest
USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin /usr/local/sbin
RUN mkdir /work/
WORKDIR /work/
ENTRYPOINT ["goagent"]
IMAGE_NAME:=cnic/csst-hstdm-l2
build:
docker build --network=host -t $(IMAGE_NAME) .
dist:
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > /share/tmp/hstdm-l2.tar
ssh c0 docker load -i /share/tmp/hstdm-l2.tar
ssh c1 docker load -i /share/tmp/hstdm-l2.tar
ssh c2 docker load -i /share/tmp/hstdm-l2.tar
push:
docker push $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-zjs:/root/csst/csst-hstdm-l2/
\ No newline at end of file
#!/bin/bash
# obsid 50100000001 - 50100000002
obsid=$1
python /pipeline/src/run.py $obsid
exit_code=$?
timefile=/pipeline/output/timestamps.txt
if test -f "$timefile"; then
echo "$timefile exist"
mv /pipeline/output/timestamps.txt /work/timestamps.txt
fi
rm -rf /pipeline/input/* /pipeline/output/*
echo finish hstdm.
exit $exit_code
CLUSTER=csst
all: reset build
run:
scalebox app create --env-file zjs.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
for m in {50100000001..50100000002}; do
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
name: hstdm.app.process
label: 太赫兹
comment: 精测模块太赫兹1+2级流水线
cluster: local
parameters:
initial_status: RUNNING
jobs:
hstdm-l1:
label: 太赫兹
base_image: cnic/csst-hstdm-l1
schedule_mode: HEAD
variables:
# always_running: yes
# reserved_on_exit: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
parameters:
# start_message: 50100000001
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CRDS_SERVER_URL=${CRDS_SERVER_URL}
paths:
- ${CSST_AUX_DIR}:/pipeline/aux
- ${CSST_DFS_ROOT}:/dfsroot
- ${CSST_CRDS_ROOT}:/crdsref
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
sink_jobs:
- hstdm-l2
# hosts:
# - h0:1
# - c0:1
# - c1:1
# - c2:1
hstdm-l2:
label: 太赫兹L2
base_image: cnic/csst-hstdm-l2
schedule_mode: HEAD
variables:
always_running: yes
reserved_on_exit: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
parameters:
# start_message: 50100000001
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CRDS_SERVER_URL=${CRDS_SERVER_URL}
paths:
- ${CSST_AUX_DIR}:/pipeline/aux
- ${CSST_DFS_ROOT}:/dfsroot
- ${CSST_CRDS_ROOT}:/crdsref
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
# hosts:
# - h0:1
# - c0:1
# - c1:1
# - c2:1
AUX_DIR=/goosefsx/x_c60_o19xp6c1_proxy/L1Pipeline/aux
CRDS_DIR=/goosefsx/x_c60_o19xp6c1_proxy/L1Pipeline/aux/products_ref20_3hdr
DFS_ROOT=/goosefsx/x_c60_o19xp6c1_proxy/dfs_root
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/share/dfs
CSST_CRDS_ROOT=/share/crdsdata/data
CSST_AUX_DIR=/share/pipeline/aux
# DFS
CSST_DFS_API_MODE=cluster
CSST_DFS_GATEWAY=172.24.27.2:30880
CSST_DFS_APP_ID=test
CSST_DFS_APP_TOKEN=test
# CRDS
CRDS_SERVER_URL=http://172.24.27.2:29000
# VOLUMES ==================
CSST_DFS_ROOT=/sharewcl/dfs
CSST_CRDS_ROOT=/sharewcl/crdsdata/data/references
CSST_AUX_DIR=/sharewcl/pipeline/aux
IMAGE_ADM:=cnic/csst-msc-l1-mbi
build:
make -C adm build
make -C exe build
sync:
rsync -av --del . csst-zjs:/tmp/csst-msc-l2-xcat
dist:
docker save hub.cstcloud.cn/scalebox/base:latest | zstdmt | pv | ssh csst-zjs 'zstd -d | docker load'
# docker save hub.cstcloud.cn/scalebox/controld:latest | zstdmt | pv | ssh csst-zjs 'zstd -d | docker load'
# docker save hub.cstcloud.cn/scalebox/database:latest | zstdmt | pv | ssh csst-zjs 'zstd -d | docker load'
push:
docker push $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
......@@ -4,7 +4,7 @@ USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin/ /usr/local/sbin/
COPY --from=hub.cstcloud.cn/scalebox/base:0.9 /usr/local/sbin/ /usr/local/sbin/
RUN mkdir -p /work && echo "PATH=/app/bin:\${PATH}" >> /root/.bashrc
# RUN mkdir -p /work /app/bin && echo "PATH=/app/bin:\${PATH}" >> /root/.bashrc
WORKDIR /work
......
name: msc-l2-xcat.app.process
label: 星表检索
comment: 星表检索
cluster: csst
parameters:
initial_status: RUNNING
jobs:
# msc-l1-mbi:
# label: 星表管理模块
# base_image: cnic/csst-msc-l1-mbi
# schedule_mode: HEAD
# variables:
# always_runing: yes
# output_text_size: 100000
# text_tranc_mode: TAIL
# locale_mode: NONE
# paths:
# - ${AUX_DIR}:/pipeline/aux
# - ${CRDS_DIR}:/L1Pipeline/aux/C6.1_ref_crds
# - ${DFS_ROOT}:/dfsroot
csst-msc-l2-xcat-adm:
label: 星表管理模块
base_image: cnic/csst-msc-l2-xcat-adm:latest
schedule_mode: HEAD
variables:
#always_running: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
parameters:
start_message: 202306212339
environments:
- CSST_DFS_API_MODE=cluster
- CSST_DFS_GATEWAY=172.24.27.2:30880
- CSST_DFS_APP_ID=test
- CSST_DFS_APP_TOKEN=test
paths: # change /share to /sharewcl in PML production environment
- /sharewcl/pipeline/output-csst-msc-l2-xcat-adm:/pipeline/output # output directory, omit this line in production
- /sharewcl/pipeline/aux:/pipeline/aux
- /sharewcl/dfs:/dfsroot
- /sharewcl/crdsdata/data/references:/crdsref
sink_jobs:
- csst-msc-l2-xcat-exe
# command: >
# bash -c "python /pipeline/src/run.py && echo '---' && cat /pipeline/output/msg.txt"
hosts:
# - h0:1
# - c0:1
# - c1:1
# - c2:1
csst-msc-l2-xcat-exe:
base_image: cnic/csst-msc-l2-xcat-exe:latest
#schedule_mode: HEAD
variables:
always_running: yes
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
environments:
- CSST_DFS_API_MODE=cluster
- CSST_DFS_GATEWAY=172.24.27.2:30880
- CSST_DFS_APP_ID=test
- CSST_DFS_APP_TOKEN=test
paths: # change /share to /sharewcl in PML production environment
# output directory, omit this line in production
- /sharewcl/pipeline/output-csst-msc-l2-xcat-exe:/pipeline/output
- /sharewcl/pipeline/aux:/pipeline/aux
- /sharewcl/dfs:/dfsroot
- /sharewcl/crdsdata/data/references:/crdsref
# command: >
# python /pipeline/src/run.py {\"brick_id\":1}
hosts:
# - h0:1
# - c0:1
# - c1:1
- c2:2
\ No newline at end of file
......@@ -4,7 +4,7 @@ USER root
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin/ /usr/local/sbin/
COPY --from=hub.cstcloud.cn/scalebox/base:0.9 /usr/local/sbin/ /usr/local/sbin/
RUN mkdir -p /work && echo "PATH=/app/bin:\${PATH}" >> /root/.bashrc
WORKDIR /work
......
......@@ -37,6 +37,8 @@ jobs:
# schedule_mode: HEAD
variables:
# always_running: yes
# DFS入库错误重试3次
retry_rules: "['202:3']"
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
......@@ -54,6 +56,6 @@ jobs:
# python /pipeline/src/run.py {\"brick_id\":1}
hosts:
# - h0:1
- c0:1
- c1:1
- c2:1
\ No newline at end of file
# - c0:1
# - c1:1
- c2:2
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment