Commit e8e9440e authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

finish ooc-exe test.

parent 7bc38a05
FROM csst/csst-ifs-l1-cube-adm:latest
LABEL maintainer="Xiaoli Zhang<zhangxiaoli@cnic.cn>"
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base:0.9 /usr/local/sbin /usr/local/sbin
RUN mkdir /work/
#WORKDIR /work/
WORKDIR /L1Pipeline/workspace
ENTRYPOINT ["goagent"]
# RUN mkdir -p /L1Pipeline/workspace
# WORKDIR /L1Pipeline/workspace
# COPY code/ /L1Pipeline/workspace
# RUN /app/bin/run.sh
IMAGE_NAME:=csst/ifs-l1-cube-adm
build:
docker build --network=host -t $(IMAGE_NAME) .
dist:
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > /share/tmp/ifs-l1-cube-adm.tar
ssh c0 docker load -i /share/tmp/ifs-l1-cube-adm.tar
ssh c1 docker load -i /share/tmp/ifs-l1-cube-adm.tar
ssh c2 docker load -i /share/tmp/ifs-l1-cube-adm.tar
push:
docker push $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-zjs:/root/csst/csst-ifs-l1-cube-adm/
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
objids=("NGC6217" "NGC6397")
for((i=0;i<${#objids[@]};i++)); do
m=${objids[$i]}
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
#!/bin/bash
# python helloworld.py
# obsid="30100000001"
obsid=$1
python l1_pipeline_script_ifs_cube_adm_simdata.py $obsid > ./stdout
exit_code=$?
mkdir -p /work
# echo "=====csst-l1-ifs-rss.log====" > /work/user-file.txt
# cat /L1Pipeline/workspace/ifs/rss/proc_data/path_RSSlog.txt >> /work/user-file.txt
# rm -rf /L1Pipeline/L0/* /L1Pipeline/L1/*
# mv /tmp/timestamps.txt /work/timestamps.txt
echo finish ifs cube adm.
cat ./stdout
exit $exit_code
CLUSTER=csst
all: reset build
run:
scalebox app create --env-file zjs.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
objids=("NGC6217" "NGC6397")
for((i=0;i<${#objids[@]};i++)); do
m=${objids[$i]}
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
name: ifs-l1-cube-adm.app.process
cluster: csst
parameters:
initial_status: RUNNING
jobs:
ifs-l1-cube-adm:
base_image: csst/ifs-l1-cube-adm
schedule_mode: HEAD
variables:
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
parameters:
# start_message: scalebox
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
paths:
- ${AUX_DIR}:/L1Pipeline/aux
- ${CRDS_DIR}:/L1Pipeline/crds
- ${DFS_ROOT}:/L1Pipeline/dfs
# command: docker run -d --network=host %ENVS% %VOLUMES% {{IMAGE}}
# sink_jobs:
# hosts:
# - h0:1
# - c0:2
# - c1:2
# - c2:2
\ No newline at end of file
AUX_DIR=/sharewcl/shao/L1Pipeline/aux
CRDS_DIR=/sharewcl/shao/L1Pipeline/crds
DFS_ROOT=/sharewcl/shao/L1Pipeline/dfs
\ No newline at end of file
FROM csst/csst-ifs-l1-cube-exe:latest
LABEL maintainer="Xiaoli Zhang<zhangxiaoli@cnic.cn>"
COPY run.sh /app/bin/
RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base:0.9 /usr/local/sbin /usr/local/sbin
RUN mkdir /work/
#WORKDIR /work/
WORKDIR /L1Pipeline/workspace
ENTRYPOINT ["goagent"]
# RUN mkdir -p /L1Pipeline/workspace
# WORKDIR /L1Pipeline/workspace
# COPY code/ /L1Pipeline/workspace
# RUN /app/bin/run.sh
IMAGE_NAME:=csst/ifs-l1-cube-exe
build:
docker build --network=host -t $(IMAGE_NAME) .
dist:
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > /share/tmp/ifs-l1-cube-exe.tar
ssh c0 docker load -i /share/tmp/ifs-l1-cube-exe.tar
ssh c1 docker load -i /share/tmp/ifs-l1-cube-exe.tar
ssh c2 docker load -i /share/tmp/ifs-l1-cube-exe.tar
push:
docker push $(IMAGE_NAME)
run:
docker run -it --entrypoint bash $(IMAGE_NAME)
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ csst-zjs:/root/csst/csst-ifs-l1-cube-exe/
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
objids=("NGC6217" "NGC6397")
for((i=0;i<${#objids[@]};i++)); do
m=${objids[$i]}
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
#!/bin/bash
# python helloworld.py
# obsid="30100000001"
obsid=$1
python l1_pipeline_script_ifs_rss_simdata.py $obsid > ./stdout
exit_code=$?
mkdir -p /work
# echo "=====csst-l1-ifs-rss.log====" > /work/user-file.txt
# cat /L1Pipeline/workspace/ifs/rss/proc_data/path_RSSlog.txt >> /work/user-file.txt
# rm -rf /L1Pipeline/L0/* /L1Pipeline/L1/*
# mv /tmp/timestamps.txt /work/timestamps.txt
echo finish ifs cube exe.
exit $exit_code
CLUSTER=csst
all: reset build
run:
scalebox app create --env-file zjs.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
down:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) down
list:
make -C ${HOME}/docker-scalebox/clusters/$(CLUSTER) list
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
PGPORT=5432
job_id=$1
objids=("NGC6217" "NGC6397")
for((i=0;i<${#objids[@]};i++)); do
m=${objids[$i]}
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
name: ifs-l1-cube-exe.app.process
cluster: csst
parameters:
initial_status: RUNNING
jobs:
ifs-l1-cube-exe:
base_image: csst/ifs-l1-cube-exe
schedule_mode: HEAD
variables:
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
parameters:
# start_message: scalebox
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
paths:
- ${AUX_DIR}:/L1Pipeline/aux
- ${CRDS_DIR}:/L1Pipeline/crds
- ${DFS_ROOT}:/L1Pipeline/dfs
# command: docker run -d --network=host %ENVS% %VOLUMES% {{IMAGE}}
# sink_jobs:
# hosts:
# - h0:1
# - c0:2
# - c1:2
# - c2:2
\ No newline at end of file
AUX_DIR=/sharewcl/shao/L1Pipeline/aux
CRDS_DIR=/sharewcl/shao/L1Pipeline/crds
DFS_ROOT=/sharewcl/shao/L1Pipeline/dfs
\ No newline at end of file
#!/bin/bash #!/bin/bash
rm -rf /pipeline/input/* /pipeline/output/* rm -rf /pipeline/input/* /pipeline/output/*
echo input:$1* # echo input:$1*
python /pipeline/src/exe.py $1 # python /pipeline/src/exe.py $1
python /pipeline/src/run.py
exit_code=$? exit_code=$?
......
CLUSTER=csst CLUSTER=local
all: reset build all: reset build
......
name: msc-l1-ooc.app.process name: msc-l1-ooc.app.process
label: 在轨定标 label: 在轨定标
comment: 主巡天在轨定标一级流水线 comment: 主巡天在轨定标一级流水线
cluster: csst cluster: local
parameters: parameters:
initial_status: RUNNING initial_status: RUNNING
jobs: jobs:
csst-msc-l1-ooc-adm: # csst-msc-l1-ooc-adm:
label: 在轨定标管理模块 # label: 在轨定标管理模块
base_image: cnic/csst-msc-l1-ooc-adm:latest # base_image: cnic/csst-msc-l1-ooc-adm:latest
schedule_mode: HEAD # schedule_mode: HEAD
variables: # variables:
# always_running: yes # # always_running: yes
output_text_size: 100000 # output_text_size: 100000
# DFS入库错误重试3次 # # DFS入库错误重试3次
retry_rules: "['202:3']" # retry_rules: "['202:3']"
text_tranc_mode: TAIL # text_tranc_mode: TAIL
locale_mode: NONE # locale_mode: NONE
# grpc_server: 172.24.23.6:50051 # # grpc_server: 172.24.23.6:50051
parameters: # parameters:
start_message: ANY # start_message: ANY
environments: # environments:
- CSST_DFS_API_MODE=cluster # - CSST_DFS_API_MODE=cluster
- CSST_DFS_GATEWAY=172.24.27.2:30880 # - CSST_DFS_GATEWAY=172.24.27.2:30880
- CSST_DFS_APP_ID=test # - CSST_DFS_APP_ID=test
- CSST_DFS_APP_TOKEN=test # - CSST_DFS_APP_TOKEN=test
paths: # change /share to /sharewcl in PML production environment # - CRDS_SERVER_URL=http://172.24.27.2:29000
- ${CSST_AUX_DIR}:/pipeline/aux # paths: # change /share to /sharewcl in PML production environment
- ${CSST_DFS_ROOT}:/dfsroot # - ${CSST_AUX_DIR}:/pipeline/aux
- ${CSST_CRDS_ROOT}:/crdsroot # - ${CSST_DFS_ROOT}:/dfsroot
sink_jobs: # - ${CSST_CRDS_ROOT}:/crdsroot
- csst-msc-l1-ooc-exe # sink_jobs:
# - csst-msc-l1-ooc-exe
# command: > # command: >
# bash -c "python /pipeline/src/run.py && echo '---' && cat /pipeline/output/msg.txt" # bash -c "python /pipeline/src/run.py && echo '---' && cat /pipeline/output/msg.txt"
...@@ -39,8 +40,10 @@ jobs: ...@@ -39,8 +40,10 @@ jobs:
label: 在轨定标执行模块 label: 在轨定标执行模块
base_image: cnic/csst-msc-l1-ooc-exe:latest base_image: cnic/csst-msc-l1-ooc-exe:latest
schedule_mode: HEAD schedule_mode: HEAD
parameters:
start_message: ANY
variables: variables:
always_running: yes # always_running: yes
output_text_size: 100000 output_text_size: 100000
# DFS入库错误重试3次 # DFS入库错误重试3次
retry_rules: "['202:3']" retry_rules: "['202:3']"
...@@ -52,6 +55,7 @@ jobs: ...@@ -52,6 +55,7 @@ jobs:
- CSST_DFS_GATEWAY=172.24.27.2:30880 - CSST_DFS_GATEWAY=172.24.27.2:30880
- CSST_DFS_APP_ID=test - CSST_DFS_APP_ID=test
- CSST_DFS_APP_TOKEN=test - CSST_DFS_APP_TOKEN=test
- CRDS_SERVER_URL=http://172.24.27.2:29000
paths: # change /share to /sharewcl in PML production environment paths: # change /share to /sharewcl in PML production environment
# output directory, omit this line in production # output directory, omit this line in production
- ${CSST_AUX_DIR}:/pipeline/aux - ${CSST_AUX_DIR}:/pipeline/aux
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment