Commit c5c50cbd authored by root's avatar root

July 2020 update

parent 0026b2b1
This diff is collapsed.
#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --time=0:05:00
#SBATCH --GRES=abaqus+5
module purge
source /usr/local/module/spartan_old.sh
module load ABAQUS/6.14.2-linux-x86_64
# Run the job 'Door'
abaqus job=Door
#!/bin/bash
#SBATCH --partition=physical
#SBATCH --time=1:00:00
module purge
source /usr/local/module/spartan_old.sh
module load ABINIT/8.0.8b-intel-2016.u3
abinit < tbase1_x.files >& log
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=2015ABRicate-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load ABRicate/0.8.7-spartan_intel-2017.u2
# The command to actually run the job
abricate ecoli_rel606.fasta
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=ABRicate-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load abricate/0.9.9-perl-5.30.0
# The command to actually run the job
abricate ecoli_rel606.fasta
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=ABySS-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load ABySS/2.0.2-goolf-2015a
# Assemble a small synthetic data set
tar xzvf test-data.tar.gz
sleep 20
abyss-pe k=25 name=test in='test-data/reads1.fastq test-data/reads2.fastq'
# Calculate assembly contiguity statistics
abyss-fac test-unitigs.fa
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=ABySS-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load abyss/2.1.5
# Assemble a small synthetic data set
tar xzvf test-data.tar.gz
sleep 20
abyss-pe k=25 name=test in='test-data/reads1.fastq test-data/reads2.fastq'
# Calculate assembly contiguity statistics
abyss-fac test-unitigs.fa
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=2015ADMIXTURE-test.slurm
# Run with two threads
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=2
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load ADMIXTURE/1.3.0
# Untar sample files, run application
# See admixture --help for options.
tar xvf hapmap3-files.tar.gz
admixture -j2 hapmap3.bed 1
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=2019ADMIXTURE-test.slurm
# Run with two threads
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=2
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load admixture/1.3.0
# Untar sample files, run application
# See admixture --help for options.
tar xvf hapmap3-files.tar.gz
admixture -j2 hapmap3.bed 1
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=2015AFNI-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load AFNI/linux_openmp_64-spartan_intel-2017.u2-20190219
# Untar dataset and run script
tar xvf ARzs_data.tgz
./@ARzs_analyze
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=AFNI-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load afni/18.3.00-python-3.7.4
# Untar dataset and run script
tar xvf ARzs_data.tgz
./@ARzs_analyze
This is incomplete; still getting the tgz files organised. LL20200707
#!/bin/bash
#SBATCH --job-name="2015ANSYStest"
# Note this order has to be kept. It's horrible, but it works.
module purge
source /usr/local/module/spartan_old.sh
module load X11/20190311-spartan_gcc-6.2.0
module load motif/2.3.5-goolf-2015a
module load libXpm/3.5.11-goolf-2015a
module load ANSYS_CFD/19.0
ansys190 -b < OscillatingPlate.inp > OscillatingPlate.db
#!/bin/bash
# Job name and partition
#SBATCH --job-name=ARAGORN-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load ARAGORN/1.2.36-GCC-4.9.2
# Run the application
aragorn -o results sample.fa
#!/bin/bash
# Add your project account details here.
# SBATCH --account=XXXX
#SBATCH --partition=gpgpu
#SBATCH --ntasks=4
#SBATCH --time=1:00:00
module purge
source /usr/local/module/spartan_old.sh
module load Amber/16-gompi-2017b-CUDA-mpi
srun /usr/local/easybuild/software/Amber/16-gompi-2017b-CUDA-mpi/amber16/bin/pmemd.cuda_DPFP.MPI -O -i mdin -o mdout -inf mdinfo -x mdcrd -r restrt
Array job indices can be specified in various number of ways.
A job array with index values between 0 and 31:
#SBATCH --array=0-31
A job array with index values of 1, 2, 5, 19, 27:
#SBATCH --array=1,2,5,19,27
A job array with index values between 1 and 7 with a step size of 2 (i.e. 1, 3, 5, 7):
#SBATCH --array=1-7:2
As with all Slurm directives, the SBATCH command can be applied within the batch script or on the command line.
To convert a directory of files to include an array ID see the following example:
$ touch aaa.fastq.gz bbb.fastq.gz ccc.fastq.gz ddd.fastq.gz
$ ls
aaa.fastq.gz bbb.fastq.gz ccc.fastq.gz ddd.fastq.gz
$ n=1; for f in *fastq.gz; do mv "$f" "$((n++))$f"; done
$ ls
1aaa.fastq.gz 2bbb.fastq.gz 3ccc.fastq.gz 4ddd.fastq.gz
See also the Octave array example in /usr/local/common/Octave.
#!/bin/bash
#SBATCH --job-name="file-array"
#SBATCH --ntasks=1
#SBATCH --time=0-00:15:00
#SBATCH --array=1-5
# Note: SLURM defaults to running jobs in the directory
# where they are submitted, no need for $PBS_O_WORKDIR
mkdir ${SLURM_ARRAY_TASK_ID}
#!/bin/bash
# Job name and partition
#SBATCH --job-name=2015BAMM-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Speciation-extinction analyses
# You must have an ultrametric phylogenetic tree.
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load BAMM/2.5.0-spartan_intel-2017.u2
# Example from: `http://bamm-project.org/quickstart.html`
# To run bamm you must specify a control file.
# The following is for diversification.
# You may wish to use traits instead
# bamm -c template_trait.txt
bamm -c template_diversification.txt
#!/bin/bash
# Job name and partition
#SBATCH --job-name=BAMM-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Speciation-extinction analyses
# You must have an ultrametric phylogenetic tree.
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load bamm/2.5.0
# Example from: `http://bamm-project.org/quickstart.html`
# To run bamm you must specify a control file.
# The following is for diversification.
# You may wish to use traits instead
# bamm -c template_trait.txt
bamm -c template_diversification.txt
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=2015BBMap-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load BBMap/36.62-intel-2016.u3-Java-1.8.0_71
# See examples at:
# http://seqanswers.com/forums/showthread.php?t=58221
reformat.sh in=sample1.fq out=processed.fq
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=BBMap-test.slurm
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load bbmap/38.76
# See examples at:
# http://seqanswers.com/forums/showthread.php?t=58221
reformat.sh in=sample1.fq out=processed.fq
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=BEDTools-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load BEDTools/2.28.0-spartan_intel-2017.u2
# BEDTools has an extensive test suite; but the tests assumes the wrong
# location for the application!
# So all these tests need to be be modified to include:
# BT=$(which bedtools)
cp -r /usr/local/easybuild/software/BEDTools/2.27.1-intel-2017.u2/test/* .
find ./ -type f -exec sed -i -e 's/${BT-..\/..\/bin\/bedtools}/$(which bedtools)/g' {} \;
sh test.sh
# Specific example commands available here:
# https://bedtools.readthedocs.io/en/latest/content/example-usage.html#bedtools-intersect
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=BEDTools-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load bedtools/2.27.1
# BEDTools has an extensive test suite; but the tests assumes the wrong
# location for the application!
# So all these tests need to be be modified to include:
# BT=$(which bedtools)
cp -r /usr/local/easybuild/software/BEDTools/2.27.1-intel-2017.u2/test/* .
find ./ -type f -exec sed -i -e 's/${BT-..\/..\/bin\/bedtools}/$(which bedtools)/g' {} \;
sh test.sh
# Specific example commands available here:
# https://bedtools.readthedocs.io/en/latest/content/example-usage.html#bedtools-intersect
#!/bin/bash
# Set the number of processors that will be used.
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=8
# Set the walltime (5 hrs)
#SBATCH --time=5:00:00
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load BLAST/2.2.26-Linux_x86_64
# Run the job
blastall -i ./rat-ests/rn_est -d ./dbs/rat.1.rna.fna -p blastn -e 0.05 -v 5 -b 5 -T F -m 9 -o rat_blast_tab.txt -a 8
#!/bin/bash
# Set the number of processors that will be used.
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=8
# Set the walltime (5 hrs)
#SBATCH --time=5:00:00
# Load the environment variables
module purge
module load spartan_2019
module load blast/2.10.0-linux_x86_64
# Run the job
blastall -i ./rat-ests/rn_est -d ./dbs/rat.1.rna.fna -p blastn -e 0.05 -v 5 -b 5 -T F -m 9 -o rat_blast_tab.txt -a 8
#!/bin/bash
#SBATCH --time=2:00:00
# Set up enviroment PATH
module purge
source /usr/local/module/spartan_old.sh
module load BWA/0.7.17-intel-2017.u2
module load SAMtools/1.9-intel-2017.u2
module load web_proxy
# Get source files
mkdir -p data/ref_genome
curl -L -o data/ref_genome/ecoli_rel606.fasta.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/017/985/GCA_000017985.1_ASM1798v1/GCA_000017985.1_ASM1798v1_genomic.fna.gz
sleep 30
gunzip data/ref_genome/ecoli_rel606.fasta.gz
curl -L -o sub.tar.gz https://downloader.figshare.com/files/14418248
sleep 60
tar xvf sub.tar.gz
mv sub/ data/trimmed_fastq_small
mkdir -p results/sam results/bam results/bcf results/vcf
# Run commands
bwa index data/ref_genome/ecoli_rel606.fasta
bwa mem data/ref_genome/ecoli_rel606.fasta data/trimmed_fastq_small/SRR2584866_1.trim.sub.fastq data/trimmed_fastq_small/SRR2584866_2.trim.sub.fastq > results/sam/SRR2584866.aligned.sam
samtools view -S -b results/sam/SRR2584866.aligned.sam > results/bam/SRR2584866.aligned.bam
samtools sort -o results/bam/SRR2584866.aligned.sorted.bam results/bam/SRR2584866.aligned.bam
#!/bin/bash
#SBATCH --time=2:00:00
# Set up enviroment PATH
module purge
module load spartan_2019
module load foss/2019b
module load bwa/0.7.17
module load samtools/1.9
module load web_proxy
# Get source files
mkdir -p data/ref_genome
curl -L -o data/ref_genome/ecoli_rel606.fasta.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/017/985/GCA_000017985.1_ASM1798v1/GCA_000017985.1_ASM1798v1_genomic.fna.gz
sleep 30
gunzip data/ref_genome/ecoli_rel606.fasta.gz
curl -L -o sub.tar.gz https://downloader.figshare.com/files/14418248
sleep 60
tar xvf sub.tar.gz
mv sub/ data/trimmed_fastq_small
mkdir -p results/sam results/bam results/bcf results/vcf
# Run commands
bwa index data/ref_genome/ecoli_rel606.fasta
bwa mem data/ref_genome/ecoli_rel606.fasta data/trimmed_fastq_small/SRR2584866_1.trim.sub.fastq data/trimmed_fastq_small/SRR2584866_2.trim.sub.fastq > results/sam/SRR2584866.aligned.sam
samtools view -S -b results/sam/SRR2584866.aligned.sam > results/bam/SRR2584866.aligned.bam
samtools sort -o results/bam/SRR2584866.aligned.sorted.bam results/bam/SRR2584866.aligned.bam
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=BEAST-test.slurm
# Run on 4 cores
#SBATCH --ntasks=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load Beast/2.3.1-intel-2016.u3
beast testRNA.xml
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=BEAST-test.slurm
# Run on 4 cores
#SBATCH --ntasks=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load beast/2.5.1
beast testRNA.xml
This diff is collapsed.
#!/bin/bash
#SBATCH --time=24:00:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=8
# You might need an external license file
# export LM_LICENSE_FILE=port@licenseserver
module purge
source /usr/local/module/spartan_new.sh
module load COMSOL/5.2
# Example batch command from csiro.org.au
comsol batch -inputfile mymodel.mph -outputfile mymodelresult.mph -batchlog mybatch.log -j b1 -np 8 -mpmode owner
#!/bin/bash
# Name and Partition
#SBATCH --job-name=CPMD-test.slurm
# Run on two cores
#SBATCH --ntasks=2
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load CPMD/4.3-intel-2018.u4
# Example taken from Axek Kohlmeyer's classic tutorial
# http://www.theochem.ruhr-uni-bochum.de/~legacy.akohlmey/cpmd-tutor/index.html
srun -n 2 cpmd.x 1-h2-wave.inp > 1-h2-wave.out
#!/bin/bash
# Name and Partition
#SBATCH --job-name=CPMD-test.slurm
# Run on four cores
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load cpmd/4.3
# Example taken from Axek Kohlmeyer's classic tutorial
# http://www.theochem.ruhr-uni-bochum.de/~legacy.akohlmey/cpmd-tutor/index.html
srun -n 4 cpmd.x 1-h2-wave.inp > 1-h2-wave.out
#!/bin/bash
#SBATCH --job-name=Cufflinks-test.slurm
# Multicore
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=2
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 1:00:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load Cufflinks/2.2.1-GCC-4.9.2
# Set the Cufflinks environment
CUFFLINKS_OUTPUT="${PWD}"
cufflinks --quiet --num-threads $SLURM_NTASKS --output-dir $CUFFLINKS_OUTPUT sample.bam
#!/bin/bash
#SBATCH --job-name=Cufflinks-test.slurm
# Multicore
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=2
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 1:00:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load cufflinks/20190706
# Set the Cufflinks environment
CUFFLINKS_OUTPUT="${PWD}"
cufflinks --quiet --num-threads $SLURM_NTASKS --output-dir $CUFFLINKS_OUTPUT sample.bam
#!/bin/bash
#SBATCH --job-name FDS_example_job
#How many nodes/cores? FDS is MPI enabled and can operate across multiple nodes
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
#What is the maximum time this job is expected to take? (Walltime)
#Format: Days-Hours:Minutes:Seconds
#SBATCH --time=1-24:00:00
module purge
/usr/local/module/spartan_old.sh
module load FDS/6.7.0-spartan_intel-2017.u2
fds pplume5.fds
!!! General configuration
&HEAD CHID='pplume5', TITLE='Plume case' /
name of the case and a brief explanation
&TIME T_END=10.0 /
the simulation will end at 10 seconds
&MISC SURF_DEFAULT='wall', TMPA=25. /
all bounding surfaces have
a 'wall' boundary condition
unless otherwise specified,
the ambient temperature is set to 25°C.
&REAC ID='polyurethane', SOOT_YIELD=0.10,
N=1.0, C=6.3, H=7.1, O=2.1 /
predominant fuel gas for the mixture fraction model
of gas phase combustion
!!! Computational domain
&MESH IJK=32,32,16, XB=0.0,1.6,0.0,1.6,0.0,0.8 /
&MESH IJK=32,32,16, XB=0.0,1.6,0.0,1.6,0.8,1.6 /
&MESH IJK=32,32,16, XB=0.0,1.6,0.0,1.6,1.6,2.4 /
&MESH IJK=32,32,16, XB=0.0,1.6,0.0,1.6,2.4,3.2 /
four connected calculation meshes
and their cell numbers
!!! Properties
&MATL ID='gypsum_plaster', CONDUCTIVITY=0.48,
SPECIFIC_HEAT=0.84, DENSITY=1440. /
thermophysical properties of 'gypsum plaster' material
&PART ID='tracers', MASSLESS=.TRUE., SAMPLING_FACTOR=1 /
a type of Lagrangian particles
&SURF ID='burner', HRRPUA=600.,
PART_ID='tracers', COLOR='RASPBERRY' /
a type of boundary conditions named 'burner'
&SURF ID='wall', RGB=200,200,200, MATL_ID='gypsum_plaster',
THICKNESS=0.012 /
a type of boundary conditions named 'wall'
!!! Solid geometry
&VENT XB=0.5,1.1,0.5,1.1,0.1,0.1, SURF_ID='burner' /
the 'burner' boundary condition
is imposed to a plane face
&OBST XB=0.5,1.1,0.5,1.1,0.0,0.1, SURF_ID='wall' /
a solid is created, 'wall' boundary condition
is imposed to all its faces
&VENT XB=0.0,0.0,0.0,1.6,0.0,3.2, SURF_ID='OPEN'/
&VENT XB=1.6,1.6,0.0,1.6,0.0,3.2, SURF_ID='OPEN'/
&VENT XB=0.0,1.6,0.0,0.0,0.0,3.2, SURF_ID='OPEN'/
&VENT XB=0.0,1.6,1.6,1.6,0.0,3.2, SURF_ID='OPEN'/
&VENT XB=0.0,1.6,0.0,1.6,3.2,3.2, SURF_ID='OPEN'/
the 'OPEN' boundary condition is imposed to
the exterior boundaries of the computational domain
!!! Output
&DEVC XYZ=1.2,1.2,2.9, QUANTITY='THERMOCOUPLE', ID='tc1' /
send to output: the data collected by a thermocouple
&ISOF QUANTITY='TEMPERATURE', VALUE(1)=100.0 /
3D contours of temperature at 100°C
&SLCF PBX=0.8, QUANTITY='TEMPERATURE', VECTOR=.TRUE. /
vector slices colored by temperature
&BNDF QUANTITY='WALL TEMPERATURE' /
surface 'WALL_TEMPERATURE' at all solid obstructions
&TAIL / end of file
#!/bin/bash
# Name and partition
#SBATCH --job-name=FFTW-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load FFTW/3.3.6-gompi-2017b
# Compile and execute
g++ -lfftw3 fftw_example.c -o fftw_example
./fftw_example > results.txt
# Example from : https://github.com/undees/fftw-example
#!/bin/bash
# Name and partition
#SBATCH --job-name=FFTW-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load fftw/3.3.8
# Compile and execute
g++ -lfftw3 fftw_example.c -o fftw_example
./fftw_example > results.txt
# Example from : https://github.com/undees/fftw-example
#!/bin/bash
#SBATCH -t 0:30:00
module purge
/usr/local/module/spartan_old.sh
module load FSL/5.0.9-centos6_64
# FSL needs to be sourced
source $FSLDIR/etc/fslconf/fsl.sh
time srun bet /usr/local/common/FSL/intro/structural.nii.gz test1FSL -f 0.1
#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH -t 0:00:05
module purge
/usr/local/module/spartan_new.sh
module load FSL/5.0.9-centos6_64
# FSL needs to be sourced
source $FSLDIR/etc/fslconf/fsl.sh
time bet /usr/local/common/FSL/intro/structural.nii.gz test1FSL -f 0.1
#!/bin/bash
#SBATCH -t 0:30:00
module purge
module load spartan_2019
module load foss/2019b
module load fsl/6.0.1-python-3.7.4
# FSL needs to be sourced
source $FSLDIR/etc/fslconf/fsl.sh
time srun bet /usr/local/common/FSL/intro/structural.nii.gz test1FSL -f 0.1
#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH -t 0:30:00
module purge
module load spartan_2019
module load foss/2019b
module load fsl/6.0.1-python-3.7.4
# FSL needs to be sourced
source $FSLDIR/etc/fslconf/fsl.sh
time srun bet /usr/local/common/FSL/intro/structural.nii.gz test1FSL -f 0.1
#!/bin/bash
#SBATCH -t 00:15:00
module purge
/usr/local/module/spartan_old.sh
module load FreeSurfer/5.3.0-GCC-4.9.2-centos6_x86_64
# Tutorial derived from `https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/GroupAnalysis`
# Export directories
export TUTORIAL_DATA=/usr/local/common/FreeSurfer
export SUBJECTS_DIR=$TUTORIAL_DATA/buckner_data/tutorial_subjs/group_analysis_tutorial
# Create contrast; test the change in thickness with age, after removing the effects of gender.
echo "0 0 0.5 0.5" > lh-Avg-thickness-age-Cor.mtx
# Assemble the precached Data (mris_preproc)
mris_preproc --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd --cache-in thickness.fwhm10.fsaverage --target fsaverage --hemi lh --out lh.gender_age.thickness.10.mgh
# Assemble the data (non-cached)
# mris_preproc --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd --target fsaverage --hemi lh --meas thickness --out $SUBJECTS_DIR/glm/lh.gender_age.thickness.00.mg
# mri_surf2surf --hemi lh --s fsaverage --sval lh.gender_age.thickness.00.mgh --fwhm 10 --cortex --tval lh.gender_age.thickness.10B.mgh
mri_glmfit --y lh.gender_age.thickness.10.mgh --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd dods --C $SUBJECTS_DIR/glm/lh-Avg-thickness-age-Cor.mtx --surf fsaverage lh --cortex --glmdir lh.gender_age.glmdir
#!/bin/bash
#SBATCH -t 01:00:00
module purge
module load spartan_2019
module load freesurfer/7.1.0-centos7_x86_64
# Tutorial derived from `https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/GroupAnalysis`
# Export directories
export TUTORIAL_DATA=/usr/local/common/FreeSurfer
export SUBJECTS_DIR=$TUTORIAL_DATA/buckner_data/tutorial_subjs/group_analysis_tutorial
# Create contrast; test the change in thickness with age, after removing the effects of gender.
echo "0 0 0.5 0.5" > lh-Avg-thickness-age-Cor.mtx
# Assemble the precached Data (mris_preproc)
mris_preproc --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd --cache-in thickness.fwhm10.fsaverage --target fsaverage --hemi lh --out lh.gender_age.thickness.10.mgh
# Assemble the data (non-cached)
# mris_preproc --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd --target fsaverage --hemi lh --meas thickness --out $SUBJECTS_DIR/glm/lh.gender_age.thickness.00.mg
# mri_surf2surf --hemi lh --s fsaverage --sval lh.gender_age.thickness.00.mgh --fwhm 10 --cortex --tval lh.gender_age.thickness.10B.mgh
mri_glmfit --y lh.gender_age.thickness.10.mgh --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd dods --C $SUBJECTS_DIR/glm/lh-Avg-thickness-age-Cor.mtx --surf fsaverage lh --cortex --glmdir lh.gender_age.glmdir
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=GAMESS-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# GAMESS likes memory!
#SBATCH --mem=64G
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load GAMESS-US/20160708-GCC-4.9.2
rungms exam01.inp
# This is a directory for memory and program debugging and profiling.
# Launch an interactive job for these examples!
sinteractive nodes=1 --ntasks-per-node=2 --time=1:00:00
# Environment
module purge
/usr/local/module/spartan_old.sh
# Valgrind
# The test program valgrindtest.c is form Punit Guron. In this example the memory allocated to the pointer 'ptr' is never freed in the program.
# Load the module and compile with debugging symbols.
module load Valgrind/3.13.0-goolf-2015a
gcc -Wall -g valgrindtest.c -o valgrindtest
valgrind --leak-check=full ./valgrindtest 2> valgrind.out
# GDB
# Compile with debugging symbols. A good compiler will give a warning here, and run the program.
gcc -Wall -g gdbtest.c -o gdbtest
$ ./gdbtest
Enter the number: 3
The factorial of 3 is 0
# Load the GDB module e.g.,
module load GDB/7.8.2-goolf-2015a
# Launch GDB, set up a break point in the code, and execute
gdb gdbtest
..
(gdb) break 10
(gdb) run
(gdb) print j
# Basic commands in GDB
# run = run a program until end, signit, or breakpoint. Use Ctrl-C to stop
# break = set a breakpoint, either by linenumber, function etc. (shortcut b)
# list = list the code above and below where the program stopped (shortcut l)
# continue = restart execution of program where is stopped (shortcut c).
# print = print a variable (shortcut p)
# next, step = after using a signal or breakpoint use next and step to
# continue a progame line-by-line.
# NB: next will go 'over' the function call to the next line of code,
# step will go 'into' the function call (shortcut s)
#
# Variables can be temporarily modified with the `set` command
# e.g., set j=1
# The code will hit the breakpoint where you can interrogate the variables.
# Testing the variable 'j' will show it has not been initialised.
# Create a new file, initialise j to 1, and test again.
cp gdbtest.c gdbtest2.c
gcc -Wall -g gdbtest2.c -o gdbtest2
$ ./gdbtest
# There is still another bug! Can you find it? Use GDB to help.
# Once you have fixed the second bug, use diff and patch to fix the original.
# The -u option provides unified content for both files.
diff -u gdbtest.c gdbtest2.c > gdbpatch.patch
# The patch command will overwrite the source with the modifications
# specified in the destination. Test the original again!
patch gdbtest.c gdbpatch.patch
# For Gprof, instrumentation code is inserted with the `-pg` option when
# compiled.
#
# GPROF output consists of two parts; the flat profile and the call graph.
# The flat profile gives the total execution time spent in each function.
# The textual call graph, shows for each function;
# (a) who called it (parent) and (b) who it called (child subroutines).
#
# Sample progam from Himanshu Arora, published on The Geek Stuff
# Compile, run the executable.
# Run the gprof tool. Various output options are available.
gcc -Wall -pg test_gprof.c test_gprof_new.c -o test_gprof
./test_gprof
gprof test_gprof gmon.out > analysis.txt
# For parallel applications each parallel process can be given its own
# output file, using the environment variable GMON_OUT_PREFIX
# Then run the parallel application as normal.
# Each grof will create a binary for each profile ID.
# View the gmon.out's as one
export GMON_OUT_PREFIX=gmon.out
mpicc -Wall -pg mpi-debug.c -o mpi-debug
srun -n mpi-debug
gprof -s mpi-debug gmon.out.*
# Last update 20200721 LL
# This is a directory for memory and program debugging and profiling.
# Launch an interactive job for these examples!
sinteractive --nodes=1 --ntasks-per-node=2 --time=1:00:00
# Environment
module purge
module load spartan_2019
module load foss/2019b
# Valgrind
# The test program valgrindtest.c is form Punit Guron. In this example the memory allocated to the pointer 'ptr' is never freed in the program.
# Load the module and compile with debugging symbols.
module load valgrind/3.14.0
gcc -Wall -g valgrindtest.c -o valgrindtest
valgrind --leak-check=full ./valgrindtest 2> valgrind.out
# GDB
# Compile with debugging symbols. A good compiler will give a warning here, and run the program.
gcc -Wall -g gdbtest.c -o gdbtest
$ ./gdbtest
Enter the number: 3
The factorial of 3 is 0
# Load the GDB module e.g.,
module load gdb/9.1-python-3.7.4
# Launch GDB, set up a break point in the code, and execute
gdb gdbtest
..
(gdb) break 10
(gdb) run
(gdb) print j
# Basic commands in GDB
# run = run a program until end, signit, or breakpoint. Use Ctrl-C to stop
# break = set a breakpoint, either by linenumber, function etc. (shortcut b)
# list = list the code above and below where the program stopped (shortcut l)
# continue = restart execution of program where is stopped (shortcut c).
# print = print a variable (shortcut p)
# next, step = after using a signal or breakpoint use next and step to
# continue a progame line-by-line.
# NB: next will go 'over' the function call to the next line of code,
# step will go 'into' the function call (shortcut s)
#
# Variables can be temporarily modified with the `set` command
# e.g., set j=1
# The code will hit the breakpoint where you can interrogate the variables.
# Testing the variable 'j' will show it has not been initialised.
# Create a new file, initialise j to 1, and test again.
cp gdbtest.c gdbtest2.c
gcc -Wall -g gdbtest2.c -o gdbtest2
$ ./gdbtest
# There is still another bug! Can you find it? Use GDB to help.
# Once you have fixed the second bug, use diff and patch to fix the original.
# The -u option provides unified content for both files.
diff -u gdbtest.c gdbtest2.c > gdbpatch.patch
# The patch command will overwrite the source with the modifications
# specified in the destination. Test the original again!
patch gdbtest.c gdbpatch.patch
# For Gprof, instrumentation code is inserted with the `-pg` option when
# compiled.
#
# GPROF output consists of two parts; the flat profile and the call graph.
# The flat profile gives the total execution time spent in each function.
# The textual call graph, shows for each function;
# (a) who called it (parent) and (b) who it called (child subroutines).
#
# Sample progam from Himanshu Arora, published on The Geek Stuff
# Compile, run the executable.
# Run the gprof tool. Various output options are available.
gcc -Wall -pg test_gprof.c test_gprof_new.c -o test_gprof
./test_gprof
gprof test_gprof gmon.out > analysis.txt
# For parallel applications each parallel process can be given its own
# output file, using the environment variable GMON_OUT_PREFIX
# Then run the parallel application as normal.
# Each grof will create a binary for each profile ID.
# View the gmon.out's as one
export GMON_OUT_PREFIX=gmon.out
mpicc -Wall -pg mpi-debug.c -o mpi-debug
srun -n 2 mpi-debug
gprof -s mpi-debug gmon.out.*
# Last update 20200721 LL
#!/bin/bash
#SBATCH --job-name="Gaussian Test"
# Change these as appropriate
INPUT_FILE="test0001.com"
OUTPUT_FILE="test0001.log"
module purge
module load spartan_2019
module load pgi/18.10-gcc-8.3.0-2.32
module load gaussian/g16c01
g16 < $INPUT_FILE > $OUTPUT_FILE
#!/bin/bash
# This script generates slurm scripts for the standard Gaussian tests.
# To submit the jobs use the following loop:
# for test in {0001..1044}; do sbatch job${test}.slurm; done
# Enjoy submitting 1044 Gaussian test jobs!
# Lev Lafayette, 2017
for test in {0001..1044}
do
cat <<- EOF > job${test}.slurm
#!/bin/bash
#SBATCH --job-name="Gaussian Test ${test}"
#SBATCH --ntasks=1
#SBATCH --time=12:00:00
module purge
/usr/local/module/spartan_old.sh
module load Gaussian/g09
g09 < test${test}.com > test${test}.log
EOF
done
#!/bin/bash
#SBATCH --job-name="Gaussian Test"
# Change these as appropriate
INPUT_FILE="test0001.com"
OUTPUT_FILE="test0001.log"
module purge
module load spartan_2019
module load pgi/18.10-gcc-8.3.0-2.32
module load gaussian/g16c01
g16 < $INPUT_FILE > $OUTPUT_FILE
#!/bin/bash
# This script generates slurm scripts for the standard Gaussian tests.
# To submit the jobs use the following loop:
# for test in {0001..1044}; do sbatch job${test}.slurm; done
# Enjoy submitting 1044 Gaussian test jobs!
# Lev Lafayette, 2017
# Updated with new build system, 2020, LL
for test in {0001..1044}
do
cat <<- EOF > job${test}.slurm
#!/bin/bash
#SBATCH --job-name="Gaussian Test ${test}"
#SBATCH --ntasks=1
#SBATCH --time=12:00:00
module purge
module load spartan_2019
module load pgi/18.10-gcc-8.3.0-2.32
module load gaussian/g16c01
g16 < test${test}.com > test${test}.log
EOF
done
# We have GnuCOBOL on Spartan!
#
# GnuCOBOL is a free version of the COBOL compiler. Best of all, it's a transpiler, which translates into C.
#
# Which means parallel COBOL!
#
# Various example programs from Lev Lafayette's talk to Linux Users of Victoria,
# GnuCOBOL: A Gnu Life for an Old Workhorse, July 2016
# http://levlafayette.com/files/2016cobol.pdf
#
# Here's some various tests:
sinteractive --time=0:1:00
module purge
/usr/local/module/spartan_old.sh
module load gnucobol/3.0-rc1-GCC-6.2.0
cobc -Wall -x -free hello.cob -o hello-world
./hello-world
cobc -Wall -m -free hello.cob
cobc -Wall -C -free hello.cob
cobc -x shortest.cob
./shortest
cobc -x hello-trad.cob
./hello-trad
cobc -Wall -x -free luv.cob
./luv
cobc -Wall -free -x literals.cob
./literals
cobc -Wall -free -x posmov1.cob
./posmov1
cobc -Wall -free -x posmov2.cob
./posmov2
cobc -Wall -free -x redefines.cob
./redefines
cobc -Wall -free -x renames.cob
./renames
cobc -Wall -free -x posmov3.cob
./posmov3
cobc -Wall -free -x posmov4.cob
cobc -Wall -free -x class.cob
./posmov4
./class
./evaluate
#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
module purge
/usr/local/module/spartan_old.sh
module load Gurobi/7.0.1
# export GRB_LICENSE_FILE=/usr/local/easybuild/software/Gurobi/gurobi.lic
time gurobi_cl misc07.mps
#!/bin/bash
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
module purge
module load spartan_2019
module load gurobi/9.0.0
time gurobi_cl misc07.mps
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=HMMER-test.slurm
# One task, multi-threaded by default
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
/usr/local/module/spartan_old.sh
module load HMMER/3.2.1-foss-2017b
# Build a profile from a basic Stockholm alignment file
hmmbuild globins4.hmm globins4.sto
# Searche a profile against a sequence database.
hmmsearch globins4.hmm globins45.fa > searchresults.txt
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=HMMER-test.slurm
# One task, multi-threaded by default
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load hmmer/3.2.1
# Build a profile from a basic Stockholm alignment file
hmmbuild globins4.hmm globins4.sto
# Searche a profile against a sequence database.
hmmsearch globins4.hmm globins45.fa > searchresults.txt
#!/bin/bash
# Partition and name
#SBATCH --job-name=HTSlib-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load HTSlib/1.9-intel-2018.u4
# Start the tabix binary from htslib
tabix -p gff sample.sorted.gff.gz;
tabix sample.sorted.gff.gz chr1:10,000,000-20,000,000;
#!/bin/bash
# Partition and name
#SBATCH --job-name=HTSlib-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load htslib/1.9
# Start the tabix binary from htslib
tabix -p gff sample.sorted.gff.gz;
tabix sample.sorted.gff.gz chr1:10,000,000-20,000,000;
/* iterate.c */
/* ------------------------------------------------------------------- */
/* A deliberately computationally-intensive task for an interative job */
/* sinteractive */
/* Compile with gcc iterate.c -o iterate, using your prefered compiler */
/* Run with time ./iterate for single core */
/* Original from https://stackoverflow.com/questions/21161175/example-of-very-cpu-intensive-c-function-under-5-lines-of-code */
/* -----------------------------------------*/
/* Multi-threaded version */
/* Now includes pragma for OMP multithreaded */
/* sinteractive --ntasks=1 --cpus-per-task=2 */
/* export OMP_NUM_THREADS=2 */
/* Compile with gcc -fopenmp iterate.c -o iterate with your preferred compiler */
/* time ./iterate */
/* Compare the difference! */
/* ----------------------- */
#include <stdio.h>
int
main (void)
{
printf("start\n");
volatile unsigned long long i;
#pragma omp parallel for
for (i = 0; i < 100000000000ULL; ++i);
printf("stop\n");
return 0;
}
#!/bin/bash
#SBATCH --ntasks=8
module purge
module load OpenMPI/1.10.0-GCC-4.9.2
time srun mpi-helloworld
#!/bin/bash
#SBATCH --nodes=2
#SBATCH --ntasks-per-node=4
module purge
module load OpenMPI/1.10.0-GCC-4.9.2
time srun mpi-helloworld
#!/bin/bash
module purge
module load OpenMPI/1.10.0-GCC-4.9.2
time srun mpi-helloworld
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=JAGS-test.slurm
# Run on four CPUs
#SBATCH --ntasks=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 1:00:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load JAGS/4.3.0-intel-2017.u2
# Extract the classic BUGS examples
tar xzvf classic-bugs.tar.gz
sleep 240
cd classic-bugs/vol1
make -j4 check
cd ../vol2
make -j4 check
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=JAGS-test.slurm
# Run on four CPUs
#SBATCH --ntasks=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 1:00:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load jags/4.3.0
# Extract the classic BUGS examples
tar xzvf classic-bugs.tar.gz
sleep 240
cd classic-bugs/vol1
make -j4 check
cd ../vol2
make -j4 check
#!/bin/bash
module purge
/usr/local/module/spartan_old.sh
module load Julia/0.6.0-binary
julia simple.jl
#!/bin/bash
module purge
module load spartan_2019
module load julia/1.3.1-linux-x86_64
julia simple.jl
#!/bin/bash
#SBATCH -p physical
#SBATCH --ntasks=8
module load MATLAB
time matlab -nodesktop -nodisplay -nosplash < tictoc.m
time matlab -nodesktop -nodisplay -nosplash < tictoc-p.m
#!/bin/bash
#SBATCH --ntasks=1
module purge
source /usr/local/module/spartan_old.sh
module load MATLAB/2016a
matlab -nodesktop -nodisplay -nosplash< polar-plot.m
#!/bin/bash
#SBATCH --ntasks=8
#SBATCH --time=1:0:0
module purge
module load spartan_2019
module load matlab/2020a
time matlab -nodesktop -nodisplay -nosplash < tictoc.m
time matlab -nodesktop -nodisplay -nosplash < tictoc-p.m
#!/bin/bash
# One task, one core, default partition, ten minutes walltime
module purge
module load spartan_2019
module load matlab/2020a
matlab -nodesktop -nodisplay -nosplash< polar-plot.m
# Managing MATLAB Parpool on Spartan
## What is a parpool
Short for 'Parallel Pool', it allows MATLAB to parallelise certain operations, reducing computation times. More info can be found at [https://www.mathworks.com/help/parallel-computing/parpool.html](https://www.mathworks.com/help/parallel-computing/parpool.html)
## Create a parpool in your MATLAB code
Firstly, let's create a MATLAB .m file
```
$ vim test-parpool.m
```
Then add
```
parpool('local', str2num(getenv('SLURM_CPUS_PER_TASK')))
```
For information about what parpools can do, see [https://hpc.nih.gov/apps/Matlabdct.html](https://hpc.nih.gov/apps/Matlabdct.html)
## Create a batch file
```
vim test-parpool.slurm
```
Add to this file
```
#!/bin/bash
#SBATCH --cpus-per-task=8
#SBATCH --time=01:00:00
module load MATLAB/2019a
matlab -nodesktop -nosplash < test-parpool.m
```
You can use `sbatch` to submit it to the queue
```
sbatch test-parpool.slurm
```
#!/bin/bash
# Name and partition
#SBATCH --job-name=Mathematica-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load Mathematica/12.0.0
# Read and evaluate the .m file
# Example derived from: https://pages.uoregon.edu/noeckel/Mathematica.html
math -noprompt -run "<<test.m" > output.txt
#!/bin/bash
# Name and partition
#SBATCH --job-name=Mathematica-test.slurm
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=8
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load mathematica/12.0.0
# Read and evaluate the .m file
# Examples from https://rcc.uchicago.edu/docs/software/environments/mathematica/index.html
math -run < math-simple.m
math -run < sample-parallel.m
Example test.m derived from: https://pages.uoregon.edu/noeckel/Mathematica.html
It will work as interactive job with Mathematica, step-wise.
A = Sum[i, {i,1,100}]
B = Mean[{25, 36, 22, 16, 8, 42}]
Answer = A + B
Quit[];
(*Limits Mathematica to requested resources*)
Unprotect[$ProcessorCount];$ProcessorCount = 8;
(*Prints the machine name that each kernel is running on*)
Print[ParallelEvaluate[$MachineName]];
(*Prints all Mersenne PRime numbers less than 2000*)
Print[Parallelize[Select[Range[2000],PrimeQ[2^#-1]&]]];
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=MrBayes-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
source /usr/local/module/spartan_old.sh
module load MrBayes/3.2.6-intel-2016.u3
mb Dengue4.env.xml
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=MrBayes-test.slurm
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load mrbayes/3.2.7
mb Dengue4.env.xml
#!/bin/bash
#SBATCH --time=0-00:15:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
module purge
/usr/local/module/spartan_old.sh
module load ORCA/4_1_0-linux_x86-64-OpenMPI-3.1.3
$EBROOTORCA/orca orca.in 1> orca.out
#!/bin/bash
#SBATCH --time=0-00:15:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
module purge
module load spartan_2019
module load foss/2019b
module load orca/4.2.1
$EBROOTORCA/orca orca.in 1> orca.out
#!/bin/bash
#SBATCH --job-name="octave-array"
#SBATCH --ntasks=1
#SBATCH --time=0-00:15:00
# This is the array
#SBATCH --array=1-10
module purge
source /usr/local/module/spartan_old.sh
module load Octave/3.8.2-goolf-2015a
octave file-${SLURM_ARRAY_TASK_ID}.oct
#!/bin/bash
#SBATCH --job-name="octave-array"
#SBATCH --ntasks=1
#SBATCH --time=0-00:15:00
# This is the array
#SBATCH --array=1-10
module purge
module load spartan_2019
module load octave/4.2.1
octave file-${SLURM_ARRAY_TASK_ID}.oct
# Part I: Preprocess 1 core OpenFOAM Task
This is the most simple process, based on the Lid-driven cavity flow from the OpenFOAM examples
(`https://www.openfoam.com/documentation/tutorial-guide/tutorialse2.php#x6-70002.1.1`)
The first example simply copies the cavity example and, from with the blockMesh command from the parameters in `blockMeshDict` in
`cavity/cavity/system`.
# Viewing the Mesh
This requires an interactive job.
1. Login with X-windows forwarding, launch the job
ssh username -X
sbatch openfoam-single-1.slurm
When the job completes check the output e.g.,
less slurm-7655177.out
2. Launch an interactive job with X-windows forwarding
sinteractive --x11=first --time=1:00:00
3. Load the module and source the application parameters.
module purge
/usr/local/module/spartan_old.sh
module load OpenFOAM/5.0-intel-2017.u2
source $FOAM_BASH
cd cavity/cavity
4. Launch Paraview
paraFoam
Select `Apply` under properties which will launch the Mesh
After that you can change the Display to use the Representation `Wireframe`.
# Part II Process 1 Core OpenFOAM Task
1. Login with X-windows forwarding, launch the job
ssh username -X
sbatch openfoam-single-2.slurm
When the job completes check the output e.g.,
less slurm-7655290.out
2. Launch an interactive job with X-windows forwarding
sinteractive --x11=first --time=1:00:00
3. Load the module and source the application parameters.
module purge
/usr/local/module/spartan_old.sh
module load OpenFOAM/5.0-intel-2017.u2
source $FOAM_BASH
cd cavity/cavity
4. Launch Paraview
paraFoam
`Select` Apply under properties which will launch the Mesh
Select the Properties panel for the cavity.OpenFOAM case module.
After selecting `Apply`, chosing under `Coloring`, `p`, and `Rescale to Data Range` (button next to `Edit`).
Run VCR Controls (play button, top menu)
Change Preset for Blue-Red colour rainbow if desired.
# OpenFOAM and RapidCFD
Please note that this tutorial is NOT YET COMPLETE
OpenFOAM can use RapidCFD for GPU acceleration. With RapidCFD, you have to specify the devices you want to use.
For example you set `--gres=gpu:1, and --nodes=4`, need to do
`srun -N 4 pulsatilePipe -parallel -devices "(0 0 0 0)" > log0`
The 0's after the devices option are the indices for the GPUs. Each node has 4 GPUs, and the indices range from 0-3 for the 4 GPUs. 0 is the
1st GPU on the node, 1 the second etc etc. So you are asking for the 1st GPU on each node.
#!/bin/bash
#SBATCH --job-name="OpenFOAM test"
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=6
#SBATCH --time=0-00:10:00
module purge
/usr/local/module/spartan_old.sh
module load OpenFOAM/5.0-intel-2017.u2
source $FOAM_BASH
# OpenFOAM commands go here. For example;
cd LES/oppositeBurningPanels
blockMesh
foamJob decomposePar
sleep 60
foamJob -parallel icoFoam
#!/bin/bash
# SBATCH --account=punim0396
# SBATCH --partition=punim0396
#SBATCH --job-name="OpenFOAM test"
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=12
#SBATCH --time=0-00:10:00
module purge
/usr/local/module/spartan_old.sh
module load OpenMPI/3.0.0-intel-2017.u2
source $FOAM_BASH
# OpenFOAM commands go here. For example;
srun -n 1 decomposePar -latestTime -force
srun -n 12 pimpleFoam -parallel
srun -n 1 recontructPar -latestTime
#!/bin/bash
#SBATCH --job-name="OpenFOAM test"
#SBATCH --partition=physical
#SBATCH --ntasks=1
#SBATCH --time=0-00:10:00
module load OpenFOAM/5.0-intel-2017.u2
source $FOAM_BASH
# OpenFOAM commands go here. For example;
cd cavity/cavity
blockMesh
#!/bin/bash
#SBATCH --job-name="OpenFOAM test"
#SBATCH --partition=physical
#SBATCH --ntasks=1
#SBATCH --time=0-00:10:00
module load OpenFOAM/5.0-intel-2017.u2
source $FOAM_BASH
# OpenFOAM commands go here. For example;
cd cavity/cavity
blockMesh
icoFoam
# Part I: Preprocess 1 core OpenFOAM Task
This is the most simple process, based on the Lid-driven cavity flow from the OpenFOAM examples
(`https://www.openfoam.com/documentation/tutorial-guide/tutorialse2.php#x6-70002.1.1`)
The first example simply copies the cavity example and, from with the blockMesh command from the parameters in `blockMeshDict` in
`cavity/cavity/system`.
# Viewing the Mesh
This requires an interactive job.
1. Login with X-windows forwarding, launch the job
ssh username -X
sbatch openfoam-single-1.slurm
When the job completes check the output e.g.,
less slurm-7655177.out
2. Launch an interactive job with X-windows forwarding
sinteractive --x11=first --time=1:00:00
3. Load the module and source the application parameters.
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
cd cavity/cavity
4. Launch Paraview
paraFoam
Select `Apply` under properties which will launch the Mesh
After that you can change the Display to use the Representation `Wireframe`.
# Part II Process 1 Core OpenFOAM Task
1. Login with X-windows forwarding, launch the job
ssh username -X
sbatch openfoam-single-2.slurm
When the job completes check the output e.g.,
less slurm-7655290.out
2. Launch an interactive job with X-windows forwarding
sinteractive --x11=first --time=1:00:00
3. Load the module and source the application parameters.
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
cd cavity/cavity
4. Launch Paraview
paraFoam
`Select` Apply under properties which will launch the Mesh
Select the Properties panel for the cavity.OpenFOAM case module.
After selecting `Apply`, chosing under `Coloring`, `p`, and `Rescale to Data Range` (button next to `Edit`).
Run VCR Controls (play button, top menu)
Change Preset for Blue-Red colour rainbow if desired.
# OpenFOAM and RapidCFD
Please note that this tutorial is NOT YET COMPLETE
OpenFOAM can use RapidCFD for GPU acceleration. With RapidCFD, you have to specify the devices you want to use.
For example you set `--gres=gpu:1, and --nodes=4`, need to do
`srun -N 4 pulsatilePipe -parallel -devices "(0 0 0 0)" > log0`
The 0's after the devices option are the indices for the GPUs. Each node has 4 GPUs, and the indices range from 0-3 for the 4 GPUs. 0 is the
1st GPU on the node, 1 the second etc etc. So you are asking for the 1st GPU on each node.
#!/bin/bash
#SBATCH --job-name="OpenFOAM test"
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=6
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
# OpenFOAM commands go here. For example;
cd LES/oppositeBurningPanels
blockMesh
foamJob decomposePar
sleep 60
foamJob -parallel icoFoam
#!/bin/bash
# SBATCH --account=punim0396
# SBATCH --partition=punim0396
#SBATCH --job-name="OpenFOAM test"
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=12
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
# OpenFOAM commands go here. For example;
srun -n 1 decomposePar -latestTime -force
srun -n 12 pimpleFoam -parallel
srun -n 1 recontructPar -latestTime
#!/bin/bash
#SBATCH --job-name="OpenFOAM test"
#SBATCH --partition=physical
#SBATCH --ntasks=1
#SBATCH --time=0-00:10:00
module load OpenFOAM/5.0-intel-2017.u2
source $FOAM_BASH
# OpenFOAM commands go here. For example;
cd cavity/cavity
blockMesh
#!/bin/bash
#SBATCH --job-name="OpenFOAM test"
#SBATCH --partition=physical
#SBATCH --ntasks=1
#SBATCH --time=0-00:10:00
module load OpenFOAM/5.0-intel-2017.u2
source $FOAM_BASH
# OpenFOAM commands go here. For example;
cd cavity/cavity
blockMesh
icoFoam
# Don't do this on the head node.
# Many of these examples are from Lev Lafayette, Sequential and Parallel Programming with C and Fortran, VPAC, 2015-2016, ISBN 978-0-9943373-1-3, https://github.com/VPAC/seqpar
$ sinteractive --time=6:00:00 --ntasks=1 --cpus-per-task=8
# 2015 modules system ..
$ module purge
$ source /usr/local/module/spartan_old.sh
$ module load GCC/4.9.2
# .. or 2019 modules system
$ module purge
$ module load spartan_2019
$ module load gcc/8.3.0
# Export with the number of threads desired. Note that it is most efficient to have a number of cpus equal to the number of threads.
$ export OMP_NUM_THREADS=8
# Compile with OpenMP directives. These examples use free-form for Fortran e.g.,
$ gcc -fopenmp helloomp.c -o helloompc
$ gfortran -fopenmp helloomp.f90 -o helloompf
# Execute the programs
$ ./helloompc
$ ./helloompf
# Note that creating executables with different compilers requires a different compiler command OpenMP flag. For example:
$ module load intel/2017.u2
$ icc -qopenmp helloomp.c -o hellompc
$ ifort -qopenmp helloomp.f90 -o hellompf
$ ./helloompc
$ ./helloompf
$ module load PGI/18.5
$ pgcc -mp helloomp.c -o hellompc
$ pgf90 -mp helloomp.f90 -o hellompf
$ ./helloompc
$ ./helloompf
# Parallel regions can call functions within them with parallel regions. By default, these have 1 thread unless an environment variable is set.
# This example from Oracle's Sun Studio 12: OpenMP API User's Guide
$ gcc -fopenmp nested.c -o nestedc
$ export OMP_NESTED=true
$ ./nestedc
$ export OMP_NESTED=false
$ ./nestedc
# The same variable name can have different values with the parallel section and outside it.
$ gcc -fopenmp sharedhello.c -o sharedhelloompc
$ gfortran -fopenmp sharedhello.f90 -o sharedhelloompf
$ ./sharedhelloompc
$ ./sharedhelloompf
# One of the most typical applications is the parallelisation of loops. This includes a worksharing construct, which distributes the execution of the parallel region among the thread team members. There is an implicit barrier at the end of a loop construct, unless a `nowait` clause has been stated. Loop iteration variables are private by default.
# Note that this example makes use of "parallel for" and "parallel do". In most cases they are mostly equivalent; parallel spawns a group of threads, while the for/do divides loop iterations between the spawned threads.
$ gcc -fopenmp hello1millomp.c -o hello1millc
$ gfortran -fopenmp hello1millomp.f90 -o hello1millf
$ ./hello1millc
$ ./hello1millf
# Sometimes separating them is a good idea for "thread aware" constructions. e.g.,
#pragma omp parallel
{
#pragma omp for
for(1...10) // first parallel block
{
}
#pragma omp single
{} // single thread processing
#pragma omp for // second parallel block
for(1...10)
{
}
#pragma omp single
{} // make some single thread processing again
}
# There is also the simd directive; this allows loop iterations to be executed on SIMD lanes that are available to the thead.
# OpenMP only used to exploit multiple threads for multiple cores; the newer simd extention allows use of SIMD instructions on modern CPUs, such as Intel's AVX/SSE and ARM's NEON etc.
# On Spartan, the AVX-512 instructions are on all of the physical nodes, phi nodes, and bm[053-066].
# Use sbatch --constraint=avx512 to run specifically on the bm nodes with this.
$ gfortran -fopenmp hello1millsimd.f90 -o hello1millsimdf
$ gcc -fopenmp hello1millsimd.c -o hello1millsimdc
$ ./hello1millsimdf
$ ./hello1millsimdc
# The sections construct distributes threads among structured blocks. Note the threadids
gfortran -fopenmp hello3versomp.f90 -o hello3versompf
gcc -fopenmp hello3versomp.c -o hello3versompc
$ ./hello3versompf
$ ./hello3versompc
# The `task` constructs are very useful to mosty efficiently implement parallelism. The general principle is that a thread generates tasks which are then executed according to the runtime system, either immediately or delayed.
$ gfortran -fopenmp colourless-3.f90 -o colourless-3f
$ gcc -fopenmp colourless-3.c -o colourless-3c
$ ./colourless-3f
$ ./colourless-3c
# Internal control variables and their interactions with runtime library routines are illustrated by the examples icv1.f90 and icv1.c.
# Four ICV's - nest-var, mex-active-levels-var, dyn-var, and nthreads-var - are modified by calls their respective library routines (omp_set_nested(), omp_set_max_active_levels(), omp_set_dynamic(), and omp_set_num_threads()).
$ gcc -fopenmp icv1.c -o icv1c
$ gfortran -fopenmp icv1.f90 -o icv1f
$ ./icv1c
$ ./icv1f
# When submitting OpenMP jobs to the cluster don't forget to include the environment variables in the job script!
# See: hello3vers.slurm
# A number of these examples come from Lev Lafayette, Sequential and Parallel Programming with C and Fortran, VPAC, 2015-2016, ISBN 978-0-9943373-1-3
# You will need to add a partition in each of these Slurm scripts; "physical" is recommended. e.g.,
# #SBATCH --partition=physical
module purge
/usr/local/module/spartan_old.sh
module load OpenMPI/1.10.2-GCC-4.9.2
mpicc mpi-helloworld.c -o mpi-helloworld
sbatch mpi-helloworld.slurm
mpif90 mpi-helloworld.f90 -o mpi-helloworld
sbatch mpi-helloworld.slurm
mpicc mpi-ping.c -o mpi-ping
sbatch mpi-ping.slurm
mpicc mpi-sendrecv.c -o mpi-sendrecv
sbatch mpi-sendrecv.slurm
mpif90 mpi-sendrecv.f90 -o mpi-sendrecv
sbatch mpi-sendrecv.slurm
mpicc mpi-pingpong.c -o mpi-pingpong
sbatch mpi-pingpong.slurm
mpicc mpi-gamethory.c -o mpi-gametheory
sbatch mpi-gametheory.slurm
# You'll need compile with the math library for this one!
mpicc mpi-particle.c -lm -o mpi-particle
sbatch mpi-gametheory.slurm
mpicc mpi-group.c -o mpi-group
sbatch mpi-group.slurm
#!/bin/bash
#SBATCH --job-name="GameTheory"
#SBATCH --ntasks=2
#SBATCH --time=0-00:10:00
module purge
/usr/local/module/spartan_old.sh
module load OpenMPI/1.10.2-GCC-4.9.2
srun ./mpi-gametheory
#!/bin/bash
#SBATCH --job-name="MPI Group"
#SBATCH --ntasks=8
#SBATCH --time=0-00:10:00
module purge
/usr/local/module/spartan_old.sh
module load OpenMPI/1.10.2-GCC-4.9.2
srun ./mpi-group
#!/bin/bash
#SBATCH --ntasks=16
module purge
/usr/local/module/spartan_old.sh
module load OpenMPI/1.10.2-GCC-4.9.2
srun mpi-helloworld
#!/bin/bash
#SBATCH --job-name="Particle Advector"
#SBATCH --ntasks=8
#SBATCH --time=0-00:10:00
module purge
/usr/local/module/spartan_old.sh
module load OpenMPI/1.10.2-GCC-4.9.2
srun ./mpi-particle
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment