diff --git a/UD_any/launchBatches.py b/UD_any/launchBatches.py index eca354092de9dfcfe43902a247c0e6e01050bd5b..e38a6b53f16cbdb56535a923776b4cc8d3cf1924 100755 --- a/UD_any/launchBatches.py +++ b/UD_any/launchBatches.py @@ -32,111 +32,6 @@ def addNamesAndCommandsDecode(names, commands, mode, expName) : commands.append("./evaluate.sh {} bin/{} --silent".format(mode, expName)) ############################################################################### -############################################################################### -#def launchArray(names, commands, mode, jobName, device, nbHours, limit) : -# filename = "{}.{}.slurm".format(mode,jobName) -# sFile = open(filename, "w") -# -# hostname = os.getenv("HOSTNAME") -# -# commandList = " ".join(commands) -# -# if hostname == "jean-zay1" : -# print("""#! /usr/bin/env bash -# -##SBATCH --array=0-{}%{} -##SBATCH --job-name={}:{} -##SBATCH --output=%A_%a.out -##SBATCH --error=%A_%a.err -##SBATCH --open-mode=append -##SBATCH --ntasks=1 -##SBATCH --cpus-per-task=10 -##SBATCH --gres=gpu:1 -##SBATCH --hint=nomultithread -##SBATCH --partition=gpu_p1 -##SBATCH --qos={} -##SBATCH --time={}:00:00 -# -#module purge -#module load gcc/9.1.0 -#module load python/3.7.5 -# -#names=({}) -#commands=({}) -# -#newOut=${{names[$SLURM_ARRAY_TASK_ID]}}".stdout" -#newErr=${{names[$SLURM_ARRAY_TASK_ID]}}".stderr" -#oldOut=$SLURM_ARRAY_JOB_ID"_"$SLURM_ARRAY_TASK_ID".out" -#oldErr=$SLURM_ARRAY_JOB_ID"_"$SLURM_ARRAY_TASK_ID".err" -#tmpFile=$SLURM_ARRAY_JOB_ID"_"$SLURM_ARRAY_TASK_ID".tmp" -# -#touch $newOut -# -#cp $newOut $tmpFile -#mv $oldOut $newOut -#cat $tmpFile >> $newOut -# -#touch $newErr -# -#cp $newErr $tmpFile -#mv $oldErr $newErr -#cat $tmpFile >> $newErr -# -#rm $tmpFile -# -#eval "${{commands[$SLURM_ARRAY_TASK_ID]}}" -#""".format(len(names)-1, limit, mode, jobName, "qos_gpu-t4" if nbHours > 20 else "qos_gpu-t3", nbHours, " ".join(names), " ".join(commands)), file=sFile) -# sFile.close() -# elif hostname == "sms.liscluster" : -# print('''#! /usr/bin/env bash -# -##SBATCH --array=0-{}%{} -##SBATCH --job-name={}:{} -##SBATCH --output=%A_%a.out -##SBATCH --error=%A_%a.err -##SBATCH --open-mode=append -##SBATCH --ntasks=1 -##SBATCH --cpus-per-task=1 -##SBATCH --hint=nomultithread -##SBATCH --partition={} -##SBATCH --exclude=sensei1,lifnode1,asfalda1 -##SBATCH --time={}:00:00 -# -#module purge -# -#names=({}) -#commands=({}) -# -#newOut=${{names[$SLURM_ARRAY_TASK_ID]}}".stdout" -#newErr=${{names[$SLURM_ARRAY_TASK_ID]}}".stderr" -#oldOut=$SLURM_ARRAY_JOB_ID"_"$SLURM_ARRAY_TASK_ID".out" -#oldErr=$SLURM_ARRAY_JOB_ID"_"$SLURM_ARRAY_TASK_ID".err" -#tmpFile=$SLURM_ARRAY_JOB_ID"_"$SLURM_ARRAY_TASK_ID".tmp" -# -#touch $newOut -# -#cp $newOut $tmpFile -#mv $oldOut $newOut -#cat $tmpFile >> $newOut -# -#touch $newErr -# -#cp $newErr $tmpFile -#mv $oldErr $newErr -#cat $tmpFile >> $newErr -# -#rm $tmpFile -# -#eval "${{commands[$SLURM_ARRAY_TASK_ID]}}" -#'''.format(len(names)-1, limit, mode, jobName, "cpu" if device == "cpu" else "gpu\n#SBATCH --gres=gpu", nbHours, " ".join(names), commandList), file=sFile) -# sFile.close() -# else : -# print("ERROR : Unknown hostname \'%s\'"%hostname) -# exit(1) -# -# subprocess.Popen("sbatch {}".format(filename), shell=True).wait() -############################################################################### - ############################################################################### if __name__ == "__main__" :