KnowHow

技術的なメモを中心にまとめます。
検索にて調べることができます。

SLURM環境変数を書き出すスクリプトサンプル

登録日 :2024/05/21 23:06
カテゴリ :SLURM

#!/bin/bash
##SBATCH --partition=cls3
#SBATCH --nodes=2
#SBATCH --ntasks=2
#SBATCH -J test
#SBATCH -o stdout.%J
#SBATCH -e stderr.%J


#for var in $(printenv | grep '^SLURM_' | cut -d= -f1); do
#  echo "echo $"$var >> environment_val
#done

echo SLURM_NODEID $SLURM_NODEID >> env.txt
echo SLURM_TASK_PID $SLURM_TASK_PID >> env.txt
echo SLURM_PRIO_PROCESS $SLURM_PRIO_PROCESS >> env.txt
echo SLURM_SUBMIT_DIR $SLURM_SUBMIT_DIR >> env.txt
echo SLURM_PROCID $SLURM_PROCID >> env.txt
echo SLURM_JOB_GID $SLURM_JOB_GID >> env.txt
echo SLURM_TASKS_PER_NODE $SLURM_TASKS_PER_NODE >> env.txt
echo SLURM_NNODES $SLURM_NNODES >> env.txt
echo SLURM_JOB_NODELIST $SLURM_JOB_NODELIST >> env.txt
echo SLURM_CLUSTER_NAME $SLURM_CLUSTER_NAME >> env.txt
echo SLURM_NODELIST $SLURM_NODELIST >> env.txt
echo SLURM_NTASKS $SLURM_NTASKS >> env.txt
echo SLURM_JOB_CPUS_PER_NODE $SLURM_JOB_CPUS_PER_NODE >> env.txt
echo SLURM_TOPOLOGY_ADDR $SLURM_TOPOLOGY_ADDR >> env.txt
echo SLURM_WORKING_CLUSTER $SLURM_WORKING_CLUSTER >> env.txt
echo SLURM_JOB_NAME $SLURM_JOB_NAME >> env.txt
echo SLURM_JOBID $SLURM_JOBID >> env.txt
echo SLURM_CONF $SLURM_CONF >> env.txt
echo SLURM_NODE_ALIASES $SLURM_NODE_ALIASES >> env.txt
echo SLURM_JOB_QOS $SLURM_JOB_QOS >> env.txt
echo SLURM_TOPOLOGY_ADDR_PATTERN $SLURM_TOPOLOGY_ADDR_PATTERN >> env.txt
echo SLURM_CPUS_ON_NODE $SLURM_CPUS_ON_NODE >> env.txt
echo SLURM_JOB_NUM_NODES $SLURM_JOB_NUM_NODES >> env.txt
echo SLURM_JOB_UID $SLURM_JOB_UID >> env.txt
echo SLURM_JOB_PARTITION $SLURM_JOB_PARTITION >> env.txt
echo SLURM_JOB_USER $SLURM_JOB_USER >> env.txt
echo SLURM_NPROCS $SLURM_NPROCS >> env.txt
echo SLURM_SUBMIT_HOST $SLURM_SUBMIT_HOST >> env.txt
echo SLURM_GTIDS $SLURM_GTIDS >> env.txt
echo SLURM_JOB_ID $SLURM_JOB_ID >> env.txt
echo SLURM_LOCALID $SLURM_LOCALID >> env.txt

#python3 job.py
mpiexec -n ${SLURM_NTASKS} python3 job.py

テストのpythonスクリプト

[user01@headnode work]$ cat job.py
#!/usr/bin/python3

import time

print("Hello World!")

for i in range(0, 10):
    time.sleep(1)
    print(i, "sec")

f = open('out.log', 'w')
f.write('Finish job')
f.close()

[user01@headnode work]$

出力結果

SLURM_NODEID 0
SLURM_TASK_PID 9618
SLURM_PRIO_PROCESS 0
SLURM_SUBMIT_DIR /home/user01/work
SLURM_PROCID 0
SLURM_JOB_GID 1000
SLURM_TASKS_PER_NODE 1(x2)
SLURM_NNODES 2
SLURM_JOB_NODELIST node[1-2]
SLURM_CLUSTER_NAME rockylinux8
SLURM_NODELIST node[1-2]
SLURM_NTASKS 2
SLURM_JOB_CPUS_PER_NODE 1(x2)
SLURM_TOPOLOGY_ADDR node1
SLURM_WORKING_CLUSTER rockylinux8:headnode:7817:9472:109
SLURM_JOB_NAME test
SLURM_JOBID 22
SLURM_CONF /etc/slurm/slurm.conf
SLURM_NODE_ALIASES (null)
SLURM_JOB_QOS normal
SLURM_TOPOLOGY_ADDR_PATTERN node
SLURM_CPUS_ON_NODE 1
SLURM_JOB_NUM_NODES 2
SLURM_JOB_UID 1000
SLURM_JOB_PARTITION part2
SLURM_JOB_USER nobuyuki
SLURM_NPROCS 2
SLURM_SUBMIT_HOST headnode
SLURM_GTIDS 0
SLURM_JOB_ID 22
SLURM_LOCALID 0

Apendix

#!/bin/bash
##SBATCH --partition=cls3
#SBATCH --nodes=2
#SBATCH --ntasks=2
#SBATCH -J test
#SBATCH -o stdout.%J
#SBATCH -e stderr.%J


#for var in $(printenv | grep '^SLURM_' | cut -d= -f1); do
#  echo "echo $"$var >> environment_val
#done

echo $SLURM_NODEID >> env.txt
echo $SLURM_TASK_PID >> env.txt
echo $SLURM_PRIO_PROCESS >> env.txt
echo $SLURM_SUBMIT_DIR >> env.txt
echo $SLURM_PROCID >> env.txt
echo $SLURM_JOB_GID >> env.txt
echo $SLURM_TASKS_PER_NODE >> env.txt
echo $SLURM_NNODES >> env.txt
echo $SLURM_JOB_NODELIST >> env.txt
echo $SLURM_CLUSTER_NAME >> env.txt
echo $SLURM_NODELIST >> env.txt
echo $SLURM_NTASKS >> env.txt
echo $SLURM_JOB_CPUS_PER_NODE >> env.txt
echo $SLURM_TOPOLOGY_ADDR >> env.txt
echo $SLURM_WORKING_CLUSTER >> env.txt
echo $SLURM_JOB_NAME >> env.txt
echo $SLURM_JOBID >> env.txt
echo $SLURM_CONF >> env.txt
echo $SLURM_NODE_ALIASES >> env.txt
echo $SLURM_JOB_QOS >> env.txt
echo $SLURM_TOPOLOGY_ADDR_PATTERN >> env.txt
echo $SLURM_CPUS_ON_NODE >> env.txt
echo $SLURM_JOB_NUM_NODES >> env.txt
echo $SLURM_JOB_UID >> env.txt
echo $SLURM_JOB_PARTITION >> env.txt
echo $SLURM_JOB_USER >> env.txt
echo $SLURM_NPROCS >> env.txt
echo $SLURM_SUBMIT_HOST >> env.txt
echo $SLURM_GTIDS >> env.txt
echo $SLURM_JOB_ID >> env.txt
echo $SLURM_LOCALID >> env.txt

#python3 job.py
mpiexec -n ${SLURM_NTASKS} python3 job.py