Commit 5404be05 authored by root's avatar root

Update for Aug 7

parent fe49bb25
......@@ -15,7 +15,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load abricate/0.9.9-perl-5.30.0
......
......@@ -15,7 +15,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load abyss/2.1.5
......
......@@ -16,7 +16,7 @@
# Load the environment variables
module purge
module load spartan_2019
module load admixture/1.3.0
# Untar sample files, run application
......
......@@ -15,7 +15,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load afni/18.3.00-python-3.7.4
......
......@@ -17,7 +17,6 @@
# You must have an ultrametric phylogenetic tree.
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load bamm/2.5.0
......
......@@ -15,7 +15,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load bbmap/38.76
# See examples at:
......
......@@ -12,7 +12,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load bedtools/2.27.1
......
......@@ -9,7 +9,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load blast/2.10.0-linux_x86_64
# Run the job
......
......@@ -3,7 +3,6 @@
# Set up enviroment PATH
module purge
module load spartan_2019
module load foss/2019b
module load bwa/0.7.17
module load samtools/1.9
......
......@@ -15,7 +15,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load beast/2.5.1
......
......@@ -16,7 +16,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load cpmd/4.3
......
......@@ -32,6 +32,8 @@ To run a sample CUDA job start with interactive job.
sinteractive --partition=gpgputest -A hpcadmingpgpu --gres=gpu:p100:4
Change "hpcadmingpgpu" to another gpgpu project.
Load a CUDA module
`module load CUDA/8.0.44-GCC-4.9.2`
......
......@@ -14,7 +14,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load cufflinks/20190706
......
......@@ -12,7 +12,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load fftw/3.3.8
......
......@@ -2,7 +2,6 @@
#SBATCH -t 0:30:00
module purge
module load spartan_2019
module load foss/2019b
module load fsl/6.0.1-python-3.7.4
......
......@@ -4,7 +4,6 @@
#SBATCH -t 0:30:00
module purge
module load spartan_2019
module load foss/2019b
module load fsl/6.0.1-python-3.7.4
......
......@@ -7,7 +7,6 @@ sinteractive --nodes=1 --ntasks-per-node=2 --time=1:00:00
# Environment
module purge
module load spartan_2019
module load foss/2019b
# Valgrind
......
......@@ -6,7 +6,6 @@ INPUT_FILE="test0001.com"
OUTPUT_FILE="test0001.log"
module purge
module load spartan_2019
module load pgi/18.10-gcc-8.3.0-2.32
module load gaussian/g16c01
......
......@@ -13,7 +13,6 @@ cat <<- EOF > job${test}.slurm
#SBATCH --ntasks=1
#SBATCH --time=12:00:00
module purge
module load spartan_2019
module load pgi/18.10-gcc-8.3.0-2.32
module load gaussian/g16c01
g16 < test${test}.com > test${test}.log
......
......@@ -2,6 +2,5 @@
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
module purge
module load spartan_2019
module load gurobi/9.0.0
time gurobi_cl misc07.mps
......@@ -16,7 +16,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load hmmer/3.2.1
......
......@@ -12,7 +12,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load htslib/1.9
......
......@@ -22,6 +22,8 @@ sinteractive --x11=first --partition=deeplearn --qos=gpgpudeeplearn --gres=gpu:v
sinteractive --partition=gpgpu --account=hpcadmingpgpu --gres=gpu:2
# (Change hpcadmingpgpu to another gpgpu-enabled account)
# If the user is not using a Linux local machine they will need to install an X-windows client, such as Xming for MS-Windows or X11 on Mac OSX from the XQuartz project.
# If you need to download files whilst on an interactive job you must use the University proxy.
......
......@@ -15,7 +15,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load jags/4.3.0
......
#!/bin/bash
module purge
module load spartan_2019
module load julia/1.3.1-linux-x86_64
julia simple.jl
......@@ -3,7 +3,6 @@
#SBATCH --time=1:0:0
module purge
module load spartan_2019
module load matlab/2020a
time matlab -nodesktop -nodisplay -nosplash < tictoc.m
......
#!/bin/bash
# One task, one core, default partition, ten minutes walltime
module purge
module load spartan_2019
module load matlab/2020a
matlab -nodesktop -nodisplay -nosplash< polar-plot.m
......@@ -14,7 +14,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load mathematica/12.0.0
# Read and evaluate the .m file
......
Example test.m derived from: https://pages.uoregon.edu/noeckel/Mathematica.html
It will work as interactive job with Mathematica, step-wise.
# Mathematica On Spartan
The example test.m is derived from: `https://pages.uoregon.edu/noeckel/Mathematica.html`
It will work as interactive job with Mathematica, step-wise.
The examples math-simple.m and sample-parallel.m are from `https://rcc.uchicago.edu/docs/software/environments/mathematica/index.html`
X-Windows forwarding will work with Mathematica. It may be necessary to also load the module web_proxy prior to invoking mathematica i.e.,
```
$ ssh username@spartan.hpc.unimelb.edu.au -X
..
$ sinteractive --x11=first
..
$ module load web_proxy
$ module load x11/20190717
$ module load mathematica/12.0.0
$ mathematica
```
# Document Version Control
v1.0 Lev Lafayette, 20200724
......@@ -12,7 +12,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load mrbayes/3.2.7
......
......@@ -15,7 +15,7 @@
# Environmental varibles to make it work:
module load spartan_2019
module purge
module load foss/2019b
module load namd/2.13-mpi
......
......@@ -4,7 +4,6 @@
#SBATCH --ntasks-per-node=4
module purge
module load spartan_2019
module load foss/2019b
module load orca/4.2.1
......
......@@ -7,7 +7,6 @@
#SBATCH --array=1-10
module purge
module load spartan_2019
module load octave/4.2.1
octave file-${SLURM_ARRAY_TASK_ID}.oct
......
......@@ -14,7 +14,7 @@ main()
Examples exercises and solutions from Pawsey Supercomputing Centre.
1. Start an interactive job
1. Start an interactive job. Use a project ID that has gpgpu access.
`sinteractive --partition=gpgputest -A hpcadmingpgpu --gres=gpu:p100:4`
2.Start with serial code
......
......@@ -26,7 +26,6 @@ sinteractive --x11=first --time=1:00:00
3. Load the module and source the application parameters.
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
......@@ -57,7 +56,6 @@ sinteractive --x11=first --time=1:00:00
3. Load the module and source the application parameters.
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
......
......@@ -5,7 +5,6 @@
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
......
......@@ -7,7 +7,6 @@
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load foss/2019b
module load openfoam/7
......
......@@ -4,7 +4,9 @@
#SBATCH --ntasks=1
#SBATCH --time=0-00:10:00
module load OpenFOAM/5.0-intel-2017.u2
module purge
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
......
......@@ -4,7 +4,9 @@
#SBATCH --ntasks=1
#SBATCH --time=0-00:10:00
module load OpenFOAM/5.0-intel-2017.u2
module purge
module load foss/2019b
module load openfoam/7
source $FOAM_BASH
......
// OpenMP example program: Dijkstra shortest-path finder in a
// bidirectional graph
// serves as a tutorial to OpenMP; see notes in comments at the end of
// the file
// each thread handles one chunk of vertices
// usage: dijkstra
// From Professor Norm Matloff University of California, Davis
#include <stdio.h>
#define LARGEINT 2<<30-1 // "infinity"
#define NV 6
// global variables, all shared by all threads by default
int ohd[NV][NV], // 1-hop distances between vertices
mind[NV], // min distances found so far
notdone[NV], // vertices not checked yet
nth, // number of threads
chunk, // number of vertices handled by each thread
md, // current min over all threads
mv; // vertex which achieves that min
void init(int ac, char **av)
{ int i,j;
for (i = 0; i < NV; i++)
for (j = 0; j < NV; j++) {
if (j == i) ohd[i][i] = 0;
else ohd[i][j] = LARGEINT;
}
ohd[0][1] = ohd[1][0] = 40;
ohd[0][2] = ohd[2][0] = 15;
ohd[1][2] = ohd[2][1] = 20;
ohd[1][3] = ohd[3][1] = 10;
ohd[1][4] = ohd[4][1] = 25;
ohd[2][3] = ohd[3][2] = 100;
ohd[1][5] = ohd[5][1] = 6;
ohd[4][5] = ohd[5][4] = 8;
for (i = 1; i < NV; i++) {
notdone[i] = 1;
mind[i] = ohd[0][i];
}
}
// finds closest to 0 among notdone, among s through e
void findmymin(int s, int e, int *d, int *v)
{ int i;
*d = LARGEINT;
for (i = s; i <= e; i++)
if (notdone[i] && mind[i] < *d) {
*d = ohd[0][i];
*v = i;
}
}
// for each i in [s,e], ask whether a shorter path to i exists, through
// mv
void updateohd(int s, int e)
{ int i;
for (i = s; i <= e; i++)
if (mind[mv] + ohd[mv][i] < mind[i])
mind[i] = mind[mv] + ohd[mv][i];
}
void dowork()
{
#pragma omp parallel // Note 1
{ int startv,endv, // start, end vertices for this thread
step, // whole procedure goes NV steps
mymd, // min value found by this thread
mymv, // vertex which attains that value
me = omp_get_thread_num(); // my thread number
#pragma omp single // Note 2
{ nth = omp_get_num_threads(); chunk = NV/nth;
printf("there are %d threads\n",nth); }
// Note 3
startv = me * chunk;
endv = startv + chunk - 1;
for (step = 0; step < NV; step++) {
// find closest vertex to 0 among notdone; each thread finds
// closest in its group, then we find overall closest
#pragma omp single
{ md = LARGEINT; mv = 0; }
findmymin(startv,endv,&mymd,&mymv);
// update overall min if mine is smaller
#pragma omp critical // Note 4
{ if (mymd < md)
{ md = mymd; mv = mymv; }
}
// mark new vertex as done
#pragma omp single
{ notdone[mv] = 0; }
// now update my section of ohd
updateohd(startv,endv);
#pragma omp barrier
}
}
}
int main(int argc, char **argv)
{ int i;
init(argc,argv);
dowork();
// back to single thread now
printf("minimum distances:\n");
for (i = 1; i < NV; i++)
printf("%d\n",mind[i]);
}
// tutorial notes:
// 1. OpenMP works via a preprocessor, which translates pragmas to
// threads calls. Note that the sharp sign ('#') must be the first
// character in the line, other than blanks.
//
// The "parallel" clause says, "Have each thread do this block"
// (enclosed by braces). Code not in a block with a "parallel"
// pragma is done only by the master thread.
// 2. The "single" clause says, "Have only one thread (whichever hits
// this line first) execute the following block."
// In this case, we are calling the OMP function
// omp_get_num_threads(), which of course returns the number of
// threads. Since we assign the return value to the global variable
// nth, only one thread needs to do this, so we use "single". And
// thought there would be no harm (other than a delay) if all
// threads did this, in some applications we would need to limit an
// action to just one thread.
// 3. The "barrier" clause does the standard barrier operation. Note
// carefully that there are also implicit barriers following blocks
// to which various OpenMP pragmas apply, such as "for" and
// "single". One can override those implicit barriers by using the
// "nowait" clause. On platforms with nonsequential memory
// consistency, you can also use the "flush" directive to force a
// memory update.
// 4. The "critical" clause sets up a critical section, with invisible
// lock/unlock operations. Note carefully that the clause may be
// followed by an optional name, which is crucial in some
// applications. All critical sections with the same name
// are guarded by the same (invisible) locks. Those with
// no name are also guarded by the same locks, so the programmer
// could really lose parallelism if he/she were not aware of this.
// Certain very specialized one-statement critical sections can be
// handled more simply and efficiently using the "atomic"
// directive, e.g.
// #pragma omp atomic
// y += x;
// Note that that statment can NOT be a block.
......@@ -10,7 +10,6 @@ $ module load GCC/4.9.2
# .. or 2019 modules system
$ module purge
$ module load spartan_2019
$ module load gcc/8.3.0
# Export with the number of threads desired. Note that it is most efficient to have a number of cpus equal to the number of threads.
......
......@@ -3,7 +3,7 @@
#SBATCH --ntasks=2
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load gcc/8.3.0
module load openmpi/3.1.4
......
......@@ -3,7 +3,7 @@
#SBATCH --ntasks=8
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load gcc/8.3.0
module load openmpi/3.1.4
......
......@@ -2,7 +2,7 @@
#SBATCH --ntasks=16
module purge
module load spartan_2019
module load gcc/8.3.0
module load openmpi/3.1.4
......
......@@ -4,7 +4,7 @@
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load gcc/8.3.0
module load openmpi/3.1.4
......
......@@ -4,7 +4,7 @@
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load gcc/8.3.0
module load openmpi/3.1.4
......
......@@ -4,7 +4,7 @@
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load gcc/8.3.0
module load openmpi/3.1.4
......
......@@ -4,7 +4,7 @@
#SBATCH --time=0-00:10:00
module purge
module load spartan_2019
module load gcc/8.3.0
module load openmpi/3.1.4
......
#!/bin/bash
#SBATCH --time=0:15:00
module purge
module load spartan_2019
module load gcccore/8.3.0
module load perl/5.30.0
perl < metaperl.pl > output.txt
......
......@@ -5,7 +5,7 @@
# Load required modules
module purge
module load spartan_2019
module load foss/2019b
module load python/3.7.4
......
......@@ -3,7 +3,7 @@
# Load required modules
module purge
module load spartan_2019
module load foss/2019b
module load python/3.7.4
......
......@@ -6,7 +6,7 @@
# Load required modules
module purge
module load spartan_2019
module load foss/2019b
module load python/3.7.4
......
#!/bin/bash
#SBATCH --partition=gpgpu
#SBATCH --gres=gpu:4
#SBATCH --account=hpcadmingpgpu
#SBATCH --account=hpcadmingpgpu
# Use a project ID that has gpgpu access.
module load QuantumESPRESSO/5.4.0-intel-2016.u3
module load CUDA/9.0.176-intel-2017.u2
......@@ -2,5 +2,6 @@
#SBATCH --partition=gpgpu
#SBATCH --gres=gpu:4
#SBATCH --account=hpcadmingpgpu
# Use a project ID that has gpgpu access.
module load QuantumESPRESSO/5.4.0-intel-2016.u3
module load CUDA/9.0.176-intel-2017.u2
......@@ -15,7 +15,6 @@
# Load the environment variables for R
module purge
module load spartan_2019
module load r/4.0.0
# The command to actually run the job
......
......@@ -17,8 +17,7 @@ R version 3.2.1 (2015-06-18) -- "World-Famous Astronaut
> install.packages("snow", repos="http://cran.r-project.org", lib="~/R_libs/")
..
> q();
[lev@spartan ~]$ echo 'R_LIBS_USER="~/R/libs"' > $HOME/.Renviron
echo 'R_LIBS_USER="~/R_libs"' > $HOME/.Renviron
[lev@spartan ~]$ echo 'R_LIBS_USER="~/R_libs"' > $HOME/.Renviron
[lev@spartan ~]$ ls ~/R_libs/
snow
......
......@@ -12,7 +12,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load foss/2019b
module load samtools/1.9
......
......@@ -12,7 +12,6 @@
# Load the environment variables
module purge
module load spartan_2019
module load gcccore/8.3.0
module load sqlite/3.29.0
......
......@@ -2,7 +2,6 @@
#SBATCH --time=00:15:00
module purge
module load spartan_2019
module load singularity/3.5.3
singularity exec vsoch-hello-world-master.simg echo "Hello from inside my container!" > output.txt
......
......@@ -12,7 +12,6 @@
# Load the environment variables
module purge
module load spartan_2019b
module load gcccore/8.3.0
module load tcl/8.6.9
......
#!/bin/bash
#SBATCH --nodes 1
#SBATCH --account hpcadmingpgpu
# Use a project ID that has gpgpu access.
#SBATCH --partition gpgpu
#SBATCH --gres=gpu:p100:4
#SBATCH --time 01:00:00
......