Commit c8f56ea3 authored by Zhang Xiaoli's avatar Zhang Xiaoli
Browse files

多色成像和无缝光谱输入参数为指定文本中的信息:obsid+chipid

parent afdb7cf7
FROM csst/csst-msc-l1-mbi:latest
FROM 10.3.10.10:8113/csst/csst-msc-l1-mbi
USER root
COPY run.sh /app/bin/
......
......@@ -3,10 +3,10 @@ IMAGE_PATH:=/nfsdata/tmp
build:
docker build --network=host -t $(IMAGE_NAME) .
dist: build
dist:
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-mbi.tar
ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar
# ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar
# ssh c1 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar
# ssh c2 docker load -i $(IMAGE_PATH)/msc-l1-mbi.tar
push:
......
......@@ -7,16 +7,17 @@ detector="${arr[1]}"
echo "obsid: "$obsid
echo "detector: "$detector
cd /pipeline/output/
python /pipeline/app/run.py $obsid $detector
exit_code=$?
echo "=====pipeline.log====" > /work/user-file.txt
cat /pipeline/output/pipeline.log >> /work/user-file.txt
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
echo "======module.log======" >> /work/user-file.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/user-file.txt
echo "======module.log======" >> /work/custom-out.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamp.txt
......
#!/bin/bash
POSTGRES_USER=scalebox
POSTGRES_HOST=localhost
POSTGRES_DB=scalebox
......@@ -8,11 +8,17 @@ PGPORT=5432
job_id=$1
for m in {10160000000..10160000001}; do
for n in 06 07 08 09 11 12 13 14 15 16 17 18 19 20 22 23 24 25 ; do
echo $m $n
msg=$m-$n
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done
obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-mbi-all.txt"
while IFS= read -r line
do
m="$line"
arr=($(echo $m | tr " " " "))
obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done
~
......@@ -33,10 +33,9 @@ jobs:
- ${CCDS_ROOT}:/ccds_root:ro
- ${CSST_AST_TEMP}:/pipeline/temp:rw
# command: docker run -d --network=host %ENVS% %VOLUMES% %IMAGE%
# sink_jobs:
hosts:
- h0:1
# - c0:1
- h0:10
- c0:20
# - c1:1
# - c2:1
......
FROM csst/csst-msc-l1-sls2d:latest
FROM 10.3.10.10:8113/csst/csst-msc-l1-sls
USER root
COPY run.sh /app/bin/
......@@ -6,6 +6,11 @@ RUN chmod +x /app/bin/run.sh
COPY --from=hub.cstcloud.cn/scalebox/base /usr/local/sbin /usr/local/sbin
RUN mkdir /work/
RUN chown -R csst:csst /work
RUN chown -R csst:csst /pipeline
WORKDIR /work/
USER csst
ENTRYPOINT ["goagent"]
IMAGE_NAME:=cnic/csst-msc-l1-sls2d
IMAGE_PATH:=/goosefsx/x-c70-y4s971cs-proxy/temp
IMAGE_NAME:=cnic/csst-msc-l1-sls
IMAGE_PATH:=/nfsdata/tmp
build:
docker build --network=host -t $(IMAGE_NAME) .
dist: build
#docker save $(IMAGE_NAME) | zstdmt | pv | ssh c0 'zstd -d | docker load'
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls2d.tar
ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-sls2d.tar
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls.tar
ssh c0 docker load -i $(IMAGE_PATH)/msc-l1-sls.tar
push:
docker push $(IMAGE_NAME)
......@@ -15,6 +15,6 @@ run:
down:
docker stop $(IMAGE_NAME)
scp:
scp -r ./ tencent-p0:/root/csst/csst-msc-l1-sls2d/
scp -r ./ tencent-p0:/root/csst/csst-msc-l1-sls/
save:
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls2d.tar
docker save $(IMAGE_NAME) > $(IMAGE_PATH)/msc-l1-sls.tar
#!/bin/bash
# obsid 100000100 ~ 100000154
##压力测,保留后三位,前六位替换回原格式100000
# test_obsid=$1
# obsid="100000"${test_obsid:0-3:3}
###
# obsid 10160000000 - 10160000136
obsid=$1
#python /L1Pipeline/build/csst_l1/app/l1_sls2d_tcc.py $obsid > /work/stdout 2> /work/stderr
# l1-sls2d --obs-id=$obsid --device=cpu --n-jobs=18 --n-jobs-gpu=18 --clean-l0 --clean-l1 --cleanup --dfs-node=pml --ver-sim=C6.2 --no-photometry --no-astrometry > /work/stdout 2> /work/stderr
python /pipeline/src/run.py \
--obs-id=$obsid \
--device=cpu \
--n-jobs=12 \
--n-jobs-gpu=6 \
--clean-l0 \
--clean-l1
# --dfs-node=pml \
# --ver-sim=C6.2
arr=($(echo $1 | tr "-" " "))
obsid="${arr[0]}"
detector="${arr[1]}"
echo "obsid: "$obsid
echo "detector: "$detector
cd /pipeline/output/
python /pipeline/app/run.py $obsid $detector
exit_code=$?
echo "=====csst-l1ppl.log====" > /work/user-file.txt
cat /pipeline/output/csst-l1ppl.log >> /work/user-file.txt
echo "======csst-l1mod.log======" >> /work/user-file.txt
cat /pipeline/output/csst-l1mod.log|tail -n 100 >> /work/user-file.txt
echo "=====pipeline.log====" > /work/custom-out.txt
cat /pipeline/output/pipeline.log >> /work/custom-out.txt
echo "======module.log======" >> /work/custom-out.txt
cat /pipeline/output/module.log|tail -n 100 >> /work/custom-out.txt
timefile=/pipeline/output/timestamps.txt
if test -f "$timefile"; then
......@@ -34,6 +25,6 @@ fi
rm -rf /pipeline/input/* /pipeline/output/*
echo finish sls2d.
echo finish sls.
exit $exit_code
CLUSTER=csst
CLUSTER=csst-nao
# all: reset build
run:
scalebox app create --env-file tencent.env
PGPORT=9090 scalebox app create --env-file nao.env
reset:
cd ${HOME}/docker-scalebox/clusters/$(CLUSTER) && make && cd -
......
......@@ -8,9 +8,16 @@ PGPORT=5432
job_id=$1
for m in {10160000000..10160000136}; do
echo $m
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT} \
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${m}')"
done
obsid="/nfsdata/share/pipeline-inttest/run-batch-msc/msg-sls-all.txt"
while IFS= read -r line
do
m="$line"
arr=($(echo $m | tr " " " "))
obsid="${arr[0]}"
detector="${arr[1]}"
msg=$obsid-$detector
echo $msg
docker exec -t database psql -U ${POSTGRES_USER} -h ${POSTGRES_HOST} -d ${POSTGRES_DB} -p ${PGPORT}\
-c "INSERT INTO t_task(job,key_message) VALUES(${job_id},'${msg}')"
done < $obsid
~
name: msc-l1-sls2d.app.process
label: 无缝光谱2D
comment: 主巡天无缝光谱2D一级流水线
cluster: csst
name: msc-l1-sls.app.process
label: 无缝光谱
comment: 主巡天无缝光谱一级流水线
cluster: csst-nao
parameters:
initial_status: RUNNING
jobs:
msc-l1-sls2d:
label: 无缝光谱2D
base_image: cnic/csst-msc-l1-sls2d
schedule_mode: HEAD
msc-l1-sls:
label: 无缝光谱
base_image: cnic/csst-msc-l1-sls
# schedule_mode: HEAD
variables:
always_running: yes
reserved_on_exit: yes
# always_running: yes
# reserved_on_exit: yes
# DFS入库错误重试3次
retry_rules: "['202:3']"
# retry_rules: "['202:3']"
output_text_size: 100000
text_tranc_mode: TAIL
locale_mode: NONE
......@@ -22,19 +22,20 @@ jobs:
parameters:
key_group_regex: ^(.{6})(.{3})$
key_group_seq: 1,2
environments:
environments:
- CSST_DFS_API_MODE=${CSST_DFS_API_MODE}
- CSST_DFS_GATEWAY=${CSST_DFS_GATEWAY}
- CSST_DFS_APP_ID=${CSST_DFS_APP_ID}
- CSST_DFS_APP_TOKEN=${CSST_DFS_APP_TOKEN}
- CRDS_SERVER_URL=${CRDS_SERVER_URL}
paths:
- ${CSST_AUX_DIR}:/pipeline/aux
- ${CSST_DFS_ROOT}:/dfsroot
- ${CSST_CRDS_ROOT}:/crdsroot
- CCDS_SERVER_URL=${CCDS_SERVER_URL}
paths:
- ${CSST_AUX_ROOT}:/pipeline/aux:ro
- ${CSST_DFS_ROOT}:/dfs_root:ro
- ${CCDS_ROOT}:/ccds_root:ro
- ${CSST_AST_TEMP}:/pipeline/temp:rw
# sink_jobs:
# hosts:
# - h0:1
# - c0:3
hosts:
- h0:10
- c0:20
# - c1:3
# - c2:3
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment