Commit c8f56ea3 authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

多色成像和无缝光谱输入参数为指定文本中的信息:obsid+chipid

parent afdb7cf7
FROM csst/csst-msc-l1-mbi:latest FROM 10.3.10.10:8113/csst/csst-msc-l1-mbi
USER root USER root
COPY run.sh /app/bin/ COPY run.sh /app/bin/
......
...@@ -3,10 +3,10 @@ IMAGE_PATH:=/nfsdata/tmp ...@@ -3,10 +3,10 @@ IMAGE_PATH:=/nfsdata/tmp
build: build:
docker build --network=host -t $(IMAGE_NAME) . docker build --network=host -t $(IMAGE_NAME) .
dist: build dist:
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load' #docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-mbi.tar docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-mbi.tar
ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar # ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar
# ssh c1 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar # ssh c1 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar
# ssh c2 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar # ssh c2 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar
push: push:
......
...@@ -7,16 +7,17 @@ detector="${arr[1]}" ...@@ -7,16 +7,17 @@ detector="${arr[1]}"
echo "obsid: "$obsid echo "obsid: "$obsid
echo "detector: "$detector echo "detector: "$detector
cd /pipeline/output/
python /pipeline/app/run.py $obsid $detector python /pipeline/app/run.py $obsid $detector
exit_code=$? exit_code=$?
echo "=====pipeline.log====" > /work/user-file.txt echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/user-file.txt cat /pipeline/output/pipeline.log >> /work/custom-out.txt
echo "======module.log======" >> /work/user-file.txt echo "======module.log======" >> /work/custom-out.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/user-file.txt cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt timefile=/pipeline/output/timestamp.txt
......
#!/bin/bash #!/bin/bash
POSTGRES_USER=scalebox POSTGRES_USER=scalebox
POSTGRES_HOST=localhost POSTGRES_HOST=localhost
POSTGRES_DB=scalebox POSTGRES_DB=scalebox
...@@ -8,11 +8,17 @@ PGPORT=5432 ...@@ -8,11 +8,17 @@ PGPORT=5432
job_id=$1 job_id=$1
for m in {10160000000..10160000001}; do
for n in 06 07 08 09 11 12 13 14 15 16 17 18 19 20 22 23 24 25 ; do obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-mbi-all.txt"
echo $m $n while IFS= read -r line
msg=$m-$n do
m="$line"
arr=($(echo $m | tr " " " "))
obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\ docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')" -c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done
done done
~
...@@ -33,10 +33,9 @@ jobs: ...@@ -33,10 +33,9 @@ jobs:
- ${CCDS_ROOT}:/ccds_root:ro - ${CCDS_ROOT}:/ccds_root:ro
- ${CSST_AST_TEMP}:/pipeline/temp:rw - ${CSST_AST_TEMP}:/pipeline/temp:rw
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE% # command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
# sink_jobs:
hosts: hosts:
- h0:1 - h0:10
# - c0:1 - c0:20
# - c1:1 # - c1:1
# - c2:1 # - c2:1
......
FROM csst/csst-msc-l1-sls2d:latest FROM 10.3.10.10:8113/csst/csst-msc-l1-sls
USER root USER root
COPY run.sh /app/bin/ COPY run.sh /app/bin/
...@@ -6,6 +6,11 @@ RUN chmod +x /app/bin/run.sh ...@@ -6,6 +6,11 @@ RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin /usr/local/sbin COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin /usr/local/sbin
RUN mkdir /work/ RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/ WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"] ENTRYPOINT ["goagent"]
IMAGE_NAME:=cnic/csst-msc-l1-sls2d IMAGE_NAME:=cnic/csst-msc-l1-sls
IMAGE_PATH:=/goosefsx/x-c70-y4s971cs-proxy/temp IMAGE_PATH:=/nfsdata/tmp
build: build:
docker build --network=host -t $(IMAGE_NAME) . docker build --network=host -t $(IMAGE_NAME) .
dist: build dist: build
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load' #docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls2d.tar docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls.tar
ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-sls2d.tar ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-sls.tar
push: push:
docker push $(IMAGE_NAME) docker push $(IMAGE_NAME)
...@@ -15,6 +15,6 @@ run: ...@@ -15,6 +15,6 @@ run:
down: down:
docker stop $(IMAGE_NAME) docker stop $(IMAGE_NAME)
scp: scp:
scp -r ./ tencent-p0:/root/csst/csst-msc-l1-sls2d/ scp -r ./ tencent-p0:/root/csst/csst-msc-l1-sls/
save: save:
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls2d.tar docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls.tar
#!/bin/bash #!/bin/bash
# obsid 100000100 ~ 100000154 arr=($(echo $1 | tr "-" " "))
##压力测,保留后三位,前六位替换回原格式100000 obsid="${arr[0]}"
# test_obsid=$1 detector="${arr[1]}"
# obsid="100000"${test_obsid:0-3:3} echo "obsid: "$obsid
### echo "detector: "$detector
# obsid 10160000000 - 10160000136 cd /pipeline/output/
obsid=$1 python /pipeline/app/run.py $obsid $detector
#python /L1Pipeline/build/csst_l1/app/l1_sls2d_tcc.py $obsid > /work/stdout 2> /work/stderr
# l1-sls2d --obs-id=$obsid --device=cpu --n-jobs=18 --n-jobs-gpu=18 --clean-l0 --clean-l1 --cleanup --dfs-node=pml --ver-sim=C6.2 --no-photometry --no-astrometry > /work/stdout 2> /work/stderr
python /pipeline/src/run.py \
--obs-id=$obsid \
--device=cpu \
--n-jobs=12 \
--n-jobs-gpu=6 \
--clean-l0 \
--clean-l1
# --dfs-node=pml \
# --ver-sim=C6.2
exit_code=$? exit_code=$?
echo "=====csst-l1ppl.log====" > /work/user-file.txt echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/csst-l1ppl.log >> /work/user-file.txt cat /pipeline/output/pipeline.log >> /work/custom-out.txt
echo "======csst-l1mod.log======" >> /work/user-file.txt
cat /pipeline/output/csst-l1mod.log|tail -n 100 >> /work/user-file.txt
echo "======module.log======" >> /work/custom-out.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamps.txt timefile=/pipeline/output/timestamps.txt
if test -f "$timefile"; then if test -f "$timefile"; then
...@@ -34,6 +25,6 @@ fi ...@@ -34,6 +25,6 @@ fi
rm -rf /pipeline/input/* /pipeline/output/* rm -rf /pipeline/input/* /pipeline/output/*
echo finish sls2d. echo finish sls.
exit $exit_code exit $exit_code
CLUSTER=csst CLUSTER=csst-nao
# all: reset build # all: reset build
run: run:
scalebox app create --env-file tencent.env PGPORT=9090 scalebox app create --env-file nao.env
reset: reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd - cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
......
...@@ -8,9 +8,16 @@ PGPORT=5432 ...@@ -8,9 +8,16 @@ PGPORT=5432
job_id=$1 job_id=$1
for m in {10160000000..10160000136}; do obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-sls-all.txt"
echo $m while IFS= read -r line
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \ do
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')" m="$line"
arr=($(echo $m | tr " " " "))
done obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done < $obsid
~
name: msc-l1-sls2d.app.process name: msc-l1-sls.app.process
label: 无缝光谱2D label: 无缝光谱
comment: 主巡天无缝光谱2D一级流水线 comment: 主巡天无缝光谱一级流水线
cluster: csst cluster: csst-nao
parameters: parameters:
initial_status: RUNNING initial_status: RUNNING
jobs: jobs:
msc-l1-sls2d: msc-l1-sls:
label: 无缝光谱2D label: 无缝光谱
base_image: cnic/csst-msc-l1-sls2d base_image: cnic/csst-msc-l1-sls
schedule_mode: HEAD # schedule_mode: HEAD
variables: variables:
always_running: yes # always_running: yes
reserved_on_exit: yes # reserved_on_exit: yes
# DFS入库错误重试3次 # DFS入库错误重试3次
retry_rules: "['202:3']" # retry_rules: "['202:3']"
output_text_size: 100000 output_text_size: 100000
text_tranc_mode: TAIL text_tranc_mode: TAIL
locale_mode: NONE locale_mode: NONE
...@@ -27,14 +27,15 @@ jobs: ...@@ -27,14 +27,15 @@ jobs:
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY} - CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID} - CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN} - CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CRDS_SERVER_URL=${CRDS_SERVER_URL} - CCDS_SERVER_URL=${CCDS_SERVER_URL}
paths: paths:
- ${CSST_AUX_DIR}:/pipeline/aux - ${CSST_AUX_ROOT}:/pipeline/aux:ro
- ${CSST_DFS_ROOT}:/dfsroot - ${CSST_DFS_ROOT}:/dfs_root:ro
- ${CSST_CRDS_ROOT}:/crdsroot - ${CCDS_ROOT}:/ccds_root:ro
- ${CSST_AST_TEMP}:/pipeline/temp:rw
# sink_jobs: # sink_jobs:
# hosts: hosts:
# - h0:1 - h0:10
# - c0:3 - c0:20
# - c1:3 # - c1:3
# - c2:3 # - c2:3
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment