Commit 72479b77 authored by Research Platforms's avatar Research Platforms

Initial commit - 18/6/2018

parents
BLAST/dbs
BLAST/rat-ests
FreeSurfer/buckner_data
FreeSurfer/buckner_data-tutorial_subjs.tar.gz
FSL/intro
FSL/preCourse.tar.gz
FSL/fmri
Gaussian/g16
Gaussian/tests
HPCshells/NAMD
NAMD/apoa1
NAMD/NAMD_BENCHMARKS_SPARTAN
NAMD/stmv
Python/minitwitter.csv
Singularity/vsoch-hello-world-master.simg
This source diff could not be displayed because it is too large. You can view the blob instead.
Please note that this tutorial is not complete yet!
#!/bin/bash
# SBATCH --account=punim0396
# SBATCH --partition=punim0396
#SBATCH --job-name="ANSYS test"
#SBATCH --partition=physical-cx4
#SBATCH --ntasks=1
#SBATCH --time=0-00:10:00
#SBATCH --gres=aa_r+1%aa_r_hpc+12
module load ansys/19.0-intel-2017.u2
ansys19 -b < OscillatingPlate.inp > OscillatingPlate.db
#!/bin/bash
# job name:
#PBS -N ansys_benchmark
# how many cpus?
#PBS -l nodes=1:ppn=2
# How long to run the job? (hours:minutes:seconds)
# You will almost certainly need to change this.
#PBS -l walltime=00:15:00
# Wait for license file
# Please leave this as it is, makes it a lot safer.
# PBS -W x=gres:aa_r+1
#PBS -W x=GRES:aa_r+1%aa_r_hpc+2
# Environmental varibles to make it work:
module load ansys/145
cd $PBS_O_WORKDIR
# Launching the job!
NODES=$(cat $PBS_NODEFILE | sort | uniq -c | awk '{print $2 "*" $1}' | tr '\n' ',' | sed 's/,$//')
echo "This job is allocated on $NODES"
cfx5solve -def Benchmark.def -parallel -par-dist $NODES
#!/bin/bash
# ------------------------------------
# ANSYS - CFX FSI script.
# All you need change is wall time and name of input files.
# And possibly the job name.
# -----------------------------------
# pbs launching script example for ANSYS job:
# job name: --- Change this if you like
#PBS -N ansys_job_01
# how many cpus? --- Probably best choice.
#PBS -l nodes=8
# How long to run the job? (hours:minutes:seconds)
# You will almost certainly need to change this.
#PBS -l walltime=00:30:00
# Name of output file:
# PBS -o ansys_test_job_02.txt
# Wait for license file
# Please leave this as it is, makes it a lot safer.
# PBS -l software=aa_r+1
#PBS -W x=GRES:aa_r+1%aa_r_hpc+2
# For CFX use only
# PBS -l software=cfx_par_proc+8
# Environmental varibles to make it work:
module load ansys/145
cd $PBS_O_WORKDIR
# Launching the job!
NODES=$(cat $PBS_NODEFILE | sort | uniq -c | awk '{print $2 "*" $1}' | tr '\n' ',' | sed 's/,$//')
echo "This job is allocated on $NODES"
cfx5solve -def OscillatingPlate.def -ansys-input OscillatingPlate.inp -parallel -par-dist $NODES
Rattus Norvegicus
=================
Sequence files and rat ESTs with BLAST
--------------------------------------
1. Get fasta nucleic acid (fna) FASTA formatted sequence files for Rattus Norvegicus
wget ftp://ftp.ncbi.nih.gov/refseq/R_norvegicus/mRNA_Prot/rat.1.rna.fna.gz
gunzip rat.1.rna.fna.gz
formatdb -i rat.1.rna.fna -p F -o T
2. Get the Express Sequence Tags, a short sub-sequence of cDNA sequence used to identify gene transcripts, used for gene discovery and gene-sequence determination
wget http://mirrors.vbi.vt.edu/mirrors/ftp.ncbi.nih.gov/genomes/Rattus_norvegicus/ARCHIVE/2002/rn_est.gz
gunzip rn_est
3. Run Slurm script to BLAST
#!/bin/bash
# Set the partition
#SBATCH -p cloud
# Set the number of processors that will be used.
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=8
# Set the walltime (10 hrs)
#SBATCH --time=10:00:00
# Load the environment variables
module load BLAST/2.2.26-Linux_x86_64
# Run the job
blastall -i ./rat-ests/rn_est -d ./dbs/rat.1.rna.fna -p blastn -e 0.05 -v 5 -b 5 -T F -m 9 -o rat_blast_tab.txt -a 8
#!/bin/bash
#SBATCH --time=24:00:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=8
# You might need an external license file
# export LM_LICENSE_FILE=port@licenseserver
module load COMSOL/5.2
# Example batch command from csiro.org.au
comsol batch -inputfile mymodel.mph -outputfile mymodelresult.mph -batchlog mybatch.log -j b1 -np 8 -mpmode owner
#!/bin/bash
#SBATCH --job-name FDS_example_job
#How many nodes/cores? FDS is MPI enabled and can operate across multiple nodes
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
#What is the maximum time this job is expected to take? (Walltime)
#Format: Days-Hours:Minutes:Seconds
#SBATCH --time=1-24:00:00
module load FDS
fds inputfile.fds outputfile.fds
#!/bin/bash
#SBATCH -p cloud
#SBATCH --ntasks=1
#SBATCH -t 0:15:00
module load FSL/5.0.9-centos6_64
# FSL needs to be sourced
source $FSLDIR/etc/fslconf/fsl.sh
srun bet /usr/local/common/FSL/intro/structural.nii.gz test1FSL -f 0.1
#!/bin/bash
#SBATCH -p cloud
#SBATCH --ntasks=8
#SBATCH -t 0:00:05
module load FSL/5.0.9-centos6_64
# FSL needs to be sourced
source $FSLDIR/etc/fslconf/fsl.sh
srun -n 8 bet /usr/local/common/FSL/intro/structural.nii.gz test2FSL -f 0.1
This diff is collapsed.
#!/bin/bash
#SBATCH -p cloud
#SBATCH --ntasks=1
#SBATCH -t 00:15:00
module load FreeSurfer/5.3.0-GCC-4.9.2-centos6_x86_64
# Tutorial derived from `https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/GroupAnalysis`
# Export directories
export TUTORIAL_DATA=/usr/local/common/FreeSurfer
export SUBJECTS_DIR=$TUTORIAL_DATA/buckner_data/tutorial_subjs/group_analysis_tutorial
# Create contrast; test the change in thickness with age, after removing the effects of gender.
echo "0 0 0.5 0.5" > lh-Avg-thickness-age-Cor.mtx
# Assemble the precached Data (mris_preproc)
mris_preproc --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd --cache-in thickness.fwhm10.fsaverage --target fsaverage --hemi lh --out lh.gender_age.thickness.10.mgh
# Assemble the data (non-cached)
# mris_preproc --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd --target fsaverage --hemi lh --meas thickness --out $SUBJECTS_DIR/glm/lh.gender_age.thickness.00.mg
# mri_surf2surf --hemi lh --s fsaverage --sval lh.gender_age.thickness.00.mgh --fwhm 10 --cortex --tval lh.gender_age.thickness.10B.mgh
mri_glmfit --y lh.gender_age.thickness.10.mgh --fsgd $SUBJECTS_DIR/glm/gender_age.fsgd dods --C $SUBJECTS_DIR/glm/lh-Avg-thickness-age-Cor.mtx --surf fsaverage lh --cortex --glmdir lh.gender_age.glmdir
We don't have the complete set of libraries installed (yet) on the compute nodes for FreeSurfer visualisation. In the meantime, short visualisation sessions can be conducted on the login node. As always, it is better to do visualisation locally and computation on the cluster.
The following is an example session for visualisation.
[lev@cricetomys HPCshells]$ ssh spartan -Y
..
[lev@spartan ~]$ module load FreeSurfer/6.0.0-GCC-4.9.2-centos6_x86_64
[lev@spartan ~]$ module load X11/20160819-GCC-4.9.2
[lev@spartan ~]$ export TUTORIAL_DATA=/usr/local/common/FreeSurfer
[lev@spartan ~]$ export SUBJECTS_DIR=$TUTORIAL_DATA/buckner_data/tutorial_subjs
[lev@spartan ~]$ cd $SUBJECTS_DIR
[lev@spartan ~]$ freeview -v \
good_output/mri/T1.mgz \
good_output/mri/wm.mgz \
good_output/mri/brainmask.mgz \
good_output/mri/aseg.mgz:colormap=lut:opacity=0.2 \
-f good_output/surf/lh.white:edgecolor=blue \
good_output/surf/lh.pial:edgecolor=red \
good_output/surf/rh.white:edgecolor=blue \
good_output/surf/rh.pial:edgecolor=red
See more here to continue the tutorial:
https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/OutputData_freeview
# This directory contains a sample Slurm scripts for the use of GPUs on Spartan. The main difference between submitting a standard Slurm job and a job that makes use of GPUs is additional paramters to the Slurm script. A user will need to specifiy that the GPU partition is being used and, in addition, a generic resource (GRES) resource request hhas been specified and the quantity of GPUs being requested.
#SBATCH --partition gpu
#SBATCH --gres=gpu
# One can also select instead, if you have access:
#
#SBATCH --partition gpgpu
# For example if you wish to access up four GPUs in a single job use:
#SBATCH --gres=gpu:4.
# However, note that this is for any type of GPGPU. However we have different GPGPUs installed. This will need to be specified.
#
# For example if you submit a job that says `--gres=gpu` for 1 GPU or `--gres=gpu:2` for 2 GPUs per task then that can be satisfied by either type
# but if you need a specific type (say P100) then you need to submit with `--gres=gpu:p100` and if you need 2 per task then you would do `--gres=gpu:p100:2`.
# Derived from:
# https://stackoverflow.com/questions/7663343/simplest-possible-example-to-show-gpu-outperform-cpu-using-cuda
# "GPUs are high bandwidth, high latency. Trying to get the GPU to beat a CPU for a nanosecond job (or even a millisecond or second job) is
# completely missing the point of doing GPU stuff. Below is some simple code, but to really appreciate the performance benefits of GPU, you'll
# need a big problem size to amortize the startup costs over... otherwise, it's meaningless. I can beat a Ferrari in a two foot race, simply
# because it take some time to turn the key, start the engine and push the pedal. That doesn't mean I'm faster than the Ferrari in any
# meaningful way."
[lev@spartan ~]$ sinteractive --time=0:30:0
srun: job 1191791 queued and waiting for resources
srun: job 1191791 has been allocated resources
[lev@spartan-rc024 ~]$ module load CUDA/7.5.18-GCC-4.9.2
# Note, must use -std=gnu99 or similar
[lev@spartan-rc024 ~]$ gcc ferrari.c -std=gnu99 -o ferrari
[lev@spartan-rc024 ~]$ time ./ferrari
Enter an index: 33
data[33] = -0.207107
real 0m22.516s
user 0m19.933s
sys 0m0.004s
[lev@spartan ~]$ sinteractive --time=0:30:0 --partition=gpu --gres=gpu:1
srun: job 1191798 queued and waiting for resources
srun: job 1191798 has been allocated resources
[lev@spartan-gpu005 ~]$ nvcc ferrari.cu -o ferrari_gpu
[lev@spartan-gpu005 ~]$ time ./ferrari_gpu
Enter an index: 33
data[33] = 0.000000
real 0m1.112s
user 0m0.001s
sys 0m0.015s
[lev@spartan-gpu005 GPU]$
File added
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main(void)
{
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
c = 0;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// Print variable
printf("%d\n", d_c);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
#include <stdio.h>
#define N (16*16)
#define M (20)
int main()
{
float data[N]; int count = 0;
for(int i = 0; i < N; i++)
{
data[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
{
data[i] = data[i] * data[i] - 0.25f;
}
}
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
#include <stdio.h>
#define N (16*16)
#define M (20)
__global__ void cudakernel(float *buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N];
float *d_data;
cudaMalloc(&d_data, N * sizeof(float));
cudakernel<<<N/32, 32>>>(d_data);
cudaMemcpy(data, d_data, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_data);
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
File added
File added
File added
#include <stdio.h>
#define N (512*512)
#define M (10000)
int main()
{
float data[N]; int count = 0;
for(int i = 0; i < N; i++)
{
data[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
{
data[i] = data[i] * data[i] - 0.25f;
}
}
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
#include <stdio.h>
#define N (512*512)
#define M (10000)
__global__ void cudakernel(float *buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N];
float *d_data;
cudaMalloc(&d_data, N * sizeof(float));
cudakernel<<<N/256, 256>>>(d_data);
cudaMemcpy(data, d_data, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_data);
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
File added
File added
#include <stdio.h>
int main(void)
{
printf("Hello World!\n");
return 0;
}
File added
#include <stdio.h>
__global__ void mykernel(void)
{
}
int main(void)
{
mykernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
# This will profile your application using the default CUDA counters, if
# you would like to use different counter you can specify this by using
# CUDA_PROFILE_CONFIG environment variable which points to a file
# containing the counters you want to use (limited to 4 per run), you
# can find more information regarding the counters and the config file
# on the NVIDIA cuda documentation.
#
# Please be aware that the profiler will only sample from 1 GPU, it has
# the same limitations as the nVidia Visual profiler provided in the
# CUDA toolkit.
declare CUDA_PROFILE=1
declare CUDA_PROFILE_CSV=1
declare CUDA_PROFILE_LOG=$HOSTNAME.nvidia.profiler.output
declare -x CUDA_PROFILE
declare -x CUDA_PROFILE_CSV
declare -x CUDA_PROFILE_LOG
ATOM 1 N LYS 1 24.966 -0.646 22.314 1.00 32.74 1SRN 99
ATOM 2 CA LYS 1 24.121 0.549 22.271 1.00 32.05 1SRN 100
ATOM 3 C LYS 1 24.794 1.733 22.943 1.00 31.16 1SRN 101
ATOM 4 O LYS 1 25.742 1.575 23.764 1.00 31.50 1SRN 102
ATOM 5 CB LYS 1 22.812 0.323 23.047 1.00 33.09 1SRN 103
ATOM 6 CG LYS 1 21.763 1.415 22.695 1.00 34.29 1SRN 104
ATOM 7 CD LYS 1 20.497 1.124 23.561 1.00 34.93 1SRN 105
ATOM 8 CE LYS 1 20.706 1.659 24.970 1.00 35.35 1SRN 106
ATOM 9 NZ LYS 1 21.524 0.759 25.825 1.00 35.85 1SRN 107
ATOM 10 N GLU 2 24.300 2.909 22.632 1.00 29.30 1SRN 108
ATOM 11 CA GLU 2 24.858 4.145 23.207 1.00 27.38 1SRN 109
ATOM 12 C GLU 2 24.567 4.201 24.693 1.00 26.12 1SRN 110
ATOM 13 O GLU 2 23.398 4.051 25.038 1.00 26.39 1SRN 111
ATOM 14 CB GLU 2 24.238 5.355 22.537 1.00 27.12 1SRN 112
ATOM 15 CG GLU 2 24.775 6.731 22.894 1.00 26.16 1SRN 113
ATOM 16 CD GLU 2 24.277 7.798 21.950 1.00 25.53 1SRN 114
ATOM 17 OE1 GLU 2 23.087 7.974 21.734 1.00 25.09 1SRN 115
ATOM 18 OE2 GLU 2 25.200 8.451 21.448 1.00 24.78 1SRN 116
ATOM 19 N THR 3 25.608 4.399 25.499 1.00 24.80 1SRN 117
ATOM 20 CA THR 3 25.475 4.513 26.954 1.00 23.26 1SRN 118
ATOM 21 C THR 3 24.803 5.847 27.263 1.00 22.23 1SRN 119
ATOM 22 O THR 3 24.805 6.756 26.419 1.00 22.26 1SRN 120
ATOM 23 CB THR 3 26.857 4.478 27.708 1.00 23.53 1SRN 121
ATOM 24 OG1 THR 3 27.581 5.698 27.276 1.00 23.39 1SRN 122
ATOM 25 CG2 THR 3 27.750 3.260 27.496 1.00 23.71 1SRN 123
ATOM 26 N ALA 4 24.316 6.023 28.470 1.00 20.81 1SRN 124
ATOM 27 CA ALA 4 23.646 7.264 28.928 1.00 19.56 1SRN 125
ATOM 28 C ALA 4 24.622 8.442 28.958 1.00 18.79 1SRN 126
ATOM 29 O ALA 4 24.267 9.606 28.686 1.00 17.61 1SRN 127
ATOM 30 CB ALA 4 23.015 7.064 30.281 1.00 19.62 1SRN 128
ATOM 31 N ALA 5 25.824 8.089 29.315 1.00 18.22 1SRN 129
ATOM 32 CA ALA 5 26.973 9.001 29.411 1.00 18.17 1SRN 130
ATOM 33 C ALA 5 27.301 9.459 27.996 1.00 18.37 1SRN 131
ATOM 34 O ALA 5 27.487 10.671 27.734 1.00 18.89 1SRN 132
ATOM 35 CB ALA 5 28.136 8.252 30.019 1.00 17.83 1SRN 133
ATOM 36 N ALA 6 27.347 8.474 27.100 1.00 17.91 1SRN 134
ATOM 37 CA ALA 6 27.667 8.723 25.675 1.00 17.62 1SRN 135
ATOM 38 C ALA 6 26.563 9.530 25.053 1.00 17.34 1SRN 136
ATOM 39 O ALA 6 26.910 10.405 24.191 1.00 18.24 1SRN 137
ATOM 40 CB ALA 6 28.009 7.493 24.904 1.00 17.09 1SRN 138
ATOM 41 N LYS 7 25.331 9.253 25.468 1.00 16.74 1SRN 139
ATOM 42 CA LYS 7 24.214 10.046 24.882 1.00 15.77 1SRN 140
ATOM 43 C LYS 7 24.248 11.484 25.368 1.00 14.54 1SRN 141
ATOM 44 O LYS 7 23.864 12.449 24.637 1.00 14.65 1SRN 142
ATOM 45 CB LYS 7 22.873 9.453 25.223 1.00 15.88 1SRN 143
ATOM 46 CG LYS 7 21.741 9.892 24.304 1.00 16.30 1SRN 144
ATOM 47 CD LYS 7 20.430 9.673 25.048 1.00 15.98 1SRN 145
ATOM 48 CE LYS 7 19.195 9.601 24.179 1.00 17.66 1SRN 146
ATOM 49 NZ LYS 7 18.362 8.506 24.926 1.00 18.08 1SRN 147
ATOM 50 N PHE 8 24.611 11.716 26.577 1.00 12.90 1SRN 148
ATOM 51 CA PHE 8 24.684 13.122 27.093 1.00 11.39 1SRN 149
ATOM 52 C PHE 8 25.642 13.925 26.270 1.00 11.71 1SRN 150
ATOM 53 O PHE 8 25.432 15.007 25.725 1.00 11.29 1SRN 151
ATOM 54 CB PHE 8 25.131 13.060 28.561 1.00 9.51 1SRN 152
ATOM 55 CG PHE 8 25.203 14.394 29.198 1.00 8.62 1SRN 153
ATOM 56 CD1 PHE 8 24.126 14.986 29.804 1.00 7.91 1SRN 154
ATOM 57 CD2 PHE 8 26.452 15.039 29.163 1.00 7.74 1SRN 155
ATOM 58 CE1 PHE 8 24.280 16.270 30.378 1.00 8.08 1SRN 156
ATOM 59 CE2 PHE 8 26.616 16.300 29.751 1.00 7.25 1SRN 157
ATOM 60 CZ PHE 8 25.504 16.901 30.351 1.00 6.96 1SRN 158
ATOM 61 N GLU 9 26.898 13.337 26.165 1.00 11.99 1SRN 159
ATOM 62 CA GLU 9 27.881 14.091 25.359 1.00 12.32 1SRN 160
ATOM 63 C GLU 9 27.371 14.464 24.013 1.00 12.06 1SRN 161
ATOM 64 O GLU 9 27.476 15.538 23.451 1.00 12.44 1SRN 162
ATOM 65 CB GLU 9 29.091 13.150 25.107 1.00 12.85 1SRN 163
ATOM 66 CG GLU 9 30.026 13.107 26.317 1.00 15.11 1SRN 164
ATOM 67 CD GLU 9 30.913 11.894 26.266 1.00 15.07 1SRN 165
ATOM 68 OE1 GLU 9 31.790 11.714 27.007 1.00 16.73 1SRN 166
ATOM 69 OE2 GLU 9 30.618 11.126 25.332 1.00 15.20 1SRN 167
ATOM 70 N ARG 10 26.718 13.468 23.337 1.00 12.46 1SRN 168
ATOM 71 CA ARG 10 26.217 13.615 22.008 1.00 12.35 1SRN 169
ATOM 72 C ARG 10 25.181 14.741 21.898 1.00 12.46 1SRN 170
ATOM 73 O ARG 10 25.315 15.571 20.989 1.00 11.22 1SRN 171
ATOM 74 CB ARG 10 25.543 12.364 21.390 1.00 12.36 1SRN 172
ATOM 75 CG ARG 10 25.041 12.649 20.020 1.00 13.12 1SRN 173
ATOM 76 CD ARG 10 24.583 11.429 19.284 1.00 13.43 1SRN 174
ATOM 77 NE ARG 10 23.705 10.574 20.090 1.00 13.83 1SRN 175
ATOM 78 CZ ARG 10 22.391 10.715 20.025 1.00 13.92 1SRN 176
ATOM 79 NH1 ARG 10 21.597 9.973 20.783 1.00 14.58 1SRN 177
ATOM 80 NH2 ARG 10 21.916 11.570 19.124 1.00 14.10 1SRN 178
ATOM 81 N GLN 11 24.193 14.618 22.850 1.00 12.41 1SRN 179