Commit 6412db16 authored by Research Platforms's avatar Research Platforms

Update for 20 Nov

parent 053e6f17
......@@ -14,5 +14,6 @@ NAMD/apoa1
NAMD/NAMD_BENCHMARKS_SPARTAN
NAMD/stmv
Python/minitwitter.csv
SAMtools/sample.sam.gz
Singularity/vsoch-hello-world-master.simg
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=ABRicate-test.slurm
#SBATCH -p cloud
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load ABRicate/0.8.7-spartan_intel-2017.u2
# The command to actually run the job
abricate ecoli_rel606.fasta
This diff is collapsed.
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=ABySS-test.slurm
#SBATCH -p cloud
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load ABySS/2.0.2-goolf-2015a
# Assemble a small synthetic data set
tar xzvf test-data.tar.gz
sleep 20
abyss-pe k=25 name=test in='test-data/reads1.fastq test-data/reads2.fastq'
# Calculate assembly contiguity statistics
abyss-fac test-unitigs.fa
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=ADMIXTURE-test.slurm
#SBATCH -p cloud
# Run with two threads
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=2
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load ADMIXTURE/1.3.0
# Untar sample files, run application
# See admixture --help for options.
tar xvf hapmap3-files.tar.gz
admixture -j2 hapmap3.bed 1
This diff is collapsed.
2431 NA19916 0 0 1 -9
2424 NA19835 0 0 2 -9
2469 NA20282 0 0 2 -9
2368 NA19703 0 0 1 -9
2425 NA19901 0 0 2 -9
2427 NA19908 0 0 1 -9
2430 NA19914 0 0 2 -9
2470 NA20287 0 0 2 -9
2436 NA19713 0 0 2 -9
2426 NA19904 0 0 1 -9
2431 NA19917 0 0 2 -9
2436 NA19982 0 0 1 -9
2487 NA20340 0 0 1 -9
2427 NA19909 0 0 2 -9
2424 NA19834 0 0 1 -9
2480 NA20317 0 0 2 -9
2418 NA19818 0 0 1 -9
2490 NA20346 0 0 1 -9
2433 NA19921 0 0 2 -9
2469 NA20281 0 0 1 -9
2495 NA20359 0 0 2 -9
2477 NA20301 0 0 2 -9
2492 NA20349 0 0 1 -9
2474 NA20294 0 0 2 -9
2494 NA20357 0 0 2 -9
2425 NA19900 0 0 1 -9
2491 NA20348 0 0 1 -9
2471 NA20289 0 0 2 -9
2489 NA20344 0 0 2 -9
2418 NA19819 0 0 2 -9
2357 NA19625 0 0 2 -9
2483 NA20332 0 0 2 -9
2367 NA19700 0 0 1 -9
2472 NA20291 0 0 1 -9
2488 NA20342 0 0 1 -9
2484 NA20334 0 0 2 -9
2494 NA20356 0 0 1 -9
2371 NA19712 0 0 2 -9
2481 NA20322 0 0 2 -9
2368 NA19704 0 0 2 -9
2367 NA19701 0 0 2 -9
2496 NA20363 0 0 2 -9
2371 NA19711 0 0 1 -9
2446 NA20126 0 0 1 -9
2485 NA20336 0 0 2 -9
2487 NA20341 0 0 2 -9
2437 NA19985 0 0 2 -9
2466 NA20276 0 0 2 -9
2446 NA20127 0 0 2 -9
1328 NA06989 0 0 2 -9
1377 NA11891 0 0 1 -9
1349 NA11843 0 0 1 -9
1330 NA12341 0 0 2 -9
1328 NA06984 0 0 1 -9
1418 NA12275 0 0 2 -9
13291 NA06986 0 0 1 -9
1418 NA12272 0 0 1 -9
13292 NA07051 0 0 1 -9
1354 NA12400 0 0 2 -9
1451 NA12777 0 0 1 -9
1421 NA12287 0 0 2 -9
1353 NA12383 0 0 2 -9
1330 NA12340 0 0 1 -9
1418 NA12273 0 0 2 -9
1377 NA11892 0 0 2 -9
1353 NA12546 0 0 1 -9
1458 NA12843 0 0 2 -9
13281 NA12348 0 0 2 -9
1423 NA11917 0 0 1 -9
1358 NA12718 0 0 2 -9
1421 NA12282 0 0 1 -9
1423 NA11920 0 0 2 -9
1451 NA12776 0 0 2 -9
1421 NA12283 0 0 2 -9
13291 NA07435 0 0 1 -9
1456 NA12828 0 0 2 -9
13291 NA07045 0 0 2 -9
13292 NA07031 0 0 2 -9
1456 NA12827 0 0 1 -9
1330 NA12343 0 0 2 -9
1451 NA12778 0 0 2 -9
1424 NA11930 0 0 1 -9
1463 NA12890 0 0 2 -9
13291 NA07037 0 0 2 -9
1345 NA07347 0 0 1 -9
1456 NA12829 0 0 1 -9
1444 NA12749 0 0 2 -9
1377 NA11894 0 0 2 -9
1421 NA12286 0 0 1 -9
1423 NA11918 0 0 2 -9
1456 NA12830 0 0 2 -9
1377 NA11893 0 0 1 -9
1423 NA11919 0 0 1 -9
1353 NA12489 0 0 2 -9
1354 NA12399 0 0 1 -9
1355 NA12413 0 0 1 -9
1458 NA12842 0 0 1 -9
13281 NA12347 0 0 1 -9
1345 NA07346 0 0 2 -9
1451 NA12775 0 0 1 -9
1463 NA12889 0 0 1 -9
1330 NA12342 0 0 1 -9
1444 NA12748 0 0 1 -9
1424 NA11931 0 0 2 -9
1346 NA12045 0 0 1 -9
1444 NA12750 0 0 1 -9
1350 NA11831 0 0 1 -9
1334 NA12146 0 0 1 -9
1347 NA11882 0 0 2 -9
1340 NA07056 0 0 2 -9
1408 NA12154 0 0 1 -9
1349 NA11839 0 0 1 -9
1459 NA12875 0 0 2 -9
1408 NA12156 0 0 2 -9
1346 NA12044 0 0 2 -9
1362 NA11992 0 0 1 -9
1350 NA11829 0 0 1 -9
1334 NA12239 0 0 2 -9
1447 NA12762 0 0 1 -9
1358 NA12716 0 0 1 -9
1459 NA12874 0 0 1 -9
1447 NA12760 0 0 1 -9
1341 NA06985 0 0 2 -9
1420 NA12003 0 0 1 -9
1340 NA07022 0 0 1 -9
1454 NA12813 0 0 2 -9
1341 NA07055 0 0 2 -9
1344 NA12056 0 0 1 -9
1334 NA12145 0 0 2 -9
1454 NA12814 0 0 1 -9
1420 NA12006 0 0 2 -9
1447 NA12763 0 0 2 -9
1345 NA07357 0 0 1 -9
1334 NA12144 0 0 1 -9
1340 NA07000 0 0 2 -9
1350 NA11832 0 0 2 -9
1349 NA11840 0 0 2 -9
1447 NA12761 0 0 2 -9
1340 NA06994 0 0 1 -9
1362 NA11993 0 0 2 -9
1362 NA11995 0 0 2 -9
1463 NA12891 0 0 1 -9
1444 NA12751 0 0 2 -9
1420 NA12005 0 0 1 -9
1375 NA12234 0 0 2 -9
1345 NA07345 0 0 2 -9
1463 NA12892 0 0 2 -9
1416 NA12248 0 0 1 -9
1459 NA12872 0 0 1 -9
1408 NA12155 0 0 1 -9
1341 NA06993 0 0 1 -9
1350 NA11830 0 0 2 -9
1416 NA12249 0 0 2 -9
1344 NA12057 0 0 2 -9
1454 NA12812 0 0 1 -9
1347 NA11881 0 0 1 -9
1362 NA11994 0 0 1 -9
1459 NA12873 0 0 2 -9
1454 NA12815 0 0 2 -9
1346 NA12043 0 0 1 -9
1375 NA12264 0 0 1 -9
M012 NA19663 0 0 2 -9
M012 NA19664 0 0 1 -9
M016 NA19722 0 0 2 -9
M016 NA19723 0 0 1 -9
M001 NA19649 0 0 1 -9
M002 NA19669 0 0 2 -9
M007 NA19657 0 0 2 -9
M007 NA19658 0 0 1 -9
M015 NA19719 0 0 2 -9
M015 NA19720 0 0 1 -9
M017 NA19726 0 0 1 -9
M023 NA19747 0 0 1 -9
M027 NA19759 0 0 1 -9
M033 NA19773 0 0 2 -9
M035 NA19780 0 0 1 -9
M004 NA19675 0 0 2 -9
M004 NA19676 0 0 1 -9
M005 NA19651 0 0 2 -9
M011 NA19684 0 0 2 -9
M017 NA19725 0 0 2 -9
M026 NA19755 0 0 2 -9
M026 NA19756 0 0 1 -9
M033 NA19774 0 0 1 -9
M034 NA19776 0 0 2 -9
M034 NA19777 0 0 1 -9
M036 NA19783 0 0 1 -9
M008 NA19661 0 0 1 -9
M010 NA19682 0 0 1 -9
M031 NA19771 0 0 1 -9
M035 NA19779 0 0 2 -9
M036 NA19782 0 0 2 -9
M037 NA19788 0 0 2 -9
M008 NA19660 0 0 2 -9
M009 NA19678 0 0 2 -9
M010 NA19681 0 0 2 -9
M023 NA19746 0 0 2 -9
M039 NA19794 0 0 2 -9
M039 NA19795 0 0 1 -9
M006 NA19654 0 0 2 -9
M024 NA19749 0 0 2 -9
M028 NA19761 0 0 2 -9
M028 NA19762 0 0 1 -9
M031 NA19770 0 0 2 -9
M002 NA19670 0 0 1 -9
M014 NA19716 0 0 2 -9
M024 NA19750 0 0 1 -9
M037 NA19789 0 0 1 -9
M011 NA19685 0 0 1 -9
M009 NA19679 0 0 1 -9
M005 NA19652 0 0 1 -9
Y001 NA18488 0 0 2 -9
Y014 NA18519 0 0 1 -9
Y039 NA19185 0 0 2 -9
Y075 NA19146 0 0 1 -9
Y073 NA19149 0 0 2 -9
Y092 NA19256 0 0 1 -9
Y006 NA19108 0 0 2 -9
Y038 NA19178 0 0 1 -9
Y030 NA18916 0 0 2 -9
Y111 NA19190 0 0 2 -9
Y061 NA19121 0 0 1 -9
Y079 NA19113 0 0 1 -9
Y116 NA19235 0 0 2 -9
Y041 NA19095 0 0 2 -9
Y041 NA19096 0 0 1 -9
Y007 NA18868 0 0 1 -9
Y027 NA18909 0 0 2 -9
Y036 NA18933 0 0 2 -9
Y044 NA19175 0 0 1 -9
Y120 NA19247 0 0 2 -9
Y002 NA18489 0 0 2 -9
Y052 NA19181 0 0 1 -9
Y092 NA19257 0 0 2 -9
Y100 NA19117 0 0 1 -9
Y110 NA19214 0 0 2 -9
Y014 NA18520 0 0 2 -9
Y110 NA19213 0 0 1 -9
Y035 NA19197 0 0 2 -9
Y035 NA19198 0 0 1 -9
Y061 NA19122 0 0 2 -9
Y036 NA18934 0 0 1 -9
Y033 NA18924 0 0 2 -9
Y075 NA19147 0 0 2 -9
Y001 NA18486 0 0 1 -9
Y003 NA18499 0 0 2 -9
Y002 NA18487 0 0 1 -9
Y120 NA19248 0 0 1 -9
Y030 NA18917 0 0 1 -9
Y079 NA19114 0 0 2 -9
Y039 NA19184 0 0 1 -9
Y044 NA19176 0 0 2 -9
Y007 NA18867 0 0 2 -9
Y116 NA19236 0 0 1 -9
Y019 NA18874 0 0 1 -9
Y038 NA19179 0 0 2 -9
Y052 NA19182 0 0 2 -9
Y057 NA19226 0 0 1 -9
Y057 NA19225 0 0 2 -9
Y111 NA19189 0 0 1 -9
Y003 NA18498 0 0 1 -9
Y100 NA19118 0 0 2 -9
Y006 NA19107 0 0 1 -9
Y010 NA18510 0 0 1 -9
Y010 NA18511 0 0 2 -9
Y033 NA18923 0 0 1 -9
Y073 NA19150 0 0 1 -9
Y027 NA18910 0 0 1 -9
Y019 NA18873 0 0 2 -9
Y005 NA18505 0 0 2 -9
Y117 NA19239 0 0 1 -9
Y004 NA18501 0 0 1 -9
Y043 NA19137 0 0 2 -9
Y072 NA19153 0 0 1 -9
Y058 NA19223 0 0 1 -9
Y024 NA18861 0 0 2 -9
Y045 NA19201 0 0 2 -9
Y074 NA19144 0 0 1 -9
Y112 NA19193 0 0 2 -9
Y005 NA18504 0 0 1 -9
Y048 NA19203 0 0 1 -9
Y074 NA19143 0 0 2 -9
Y017 NA18871 0 0 1 -9
Y017 NA18870 0 0 2 -9
Y045 NA19200 0 0 1 -9
Y013 NA18517 0 0 2 -9
Y023 NA18855 0 0 2 -9
Y050 NA19209 0 0 2 -9
Y056 NA19160 0 0 1 -9
Y072 NA19152 0 0 2 -9
Y042 NA19102 0 0 2 -9
Y047 NA19172 0 0 2 -9
Y117 NA19238 0 0 2 -9
Y028 NA18913 0 0 1 -9
Y028 NA18912 0 0 2 -9
Y058 NA19222 0 0 2 -9
Y009 NA18508 0 0 2 -9
Y043 NA19138 0 0 1 -9
Y018 NA18852 0 0 2 -9
Y050 NA19210 0 0 1 -9
Y048 NA19204 0 0 2 -9
Y009 NA18507 0 0 1 -9
Y056 NA19159 0 0 2 -9
Y013 NA18516 0 0 1 -9
Y024 NA18862 0 0 1 -9
Y042 NA19101 0 0 1 -9
Y112 NA19192 0 0 1 -9
Y012 NA18858 0 0 2 -9
Y071 NA19141 0 0 1 -9
Y040 NA19093 0 0 2 -9
Y051 NA19206 0 0 2 -9
Y105 NA19098 0 0 1 -9
Y101 NA19130 0 0 1 -9
Y077 NA19128 0 0 1 -9
Y101 NA19131 0 0 2 -9
Y060 NA19116 0 0 2 -9
Y077 NA19127 0 0 2 -9
Y018 NA18853 0 0 1 -9
Y047 NA19171 0 0 1 -9
Y071 NA19140 0 0 2 -9
Y012 NA18859 0 0 1 -9
Y060 NA19119 0 0 1 -9
Y051 NA19207 0 0 1 -9
Y105 NA19099 0 0 2 -9
This diff is collapsed.
#!/bin/tcsh
# ---------------------------------------------------------
# if the afni directory is already here, whine a bit and terminate the script
if ( -e ARzs_data/afni ) then
echo ""
echo "Failure:"
echo ""
echo "The resulting afni directory already exists. Please move the"
echo "afni directory and then re-run the script. Example:"
echo ""
echo " mv ARzs_data/afni ARzs_data/afni.bak"
echo ""
exit
endif
# ---------------------------------------------------------
# start in the top level data directory
cd ARzs_data
# ---------------------------------------------------------
# create AFNI dataset from spgr I-files - will move them to afni dir later
to3d -prefix ARzs_spgr -spgr SPGR_data/I.*
# ---------------------------------------------------------
# create AFNI datasets from both EPI runs
cd EPI_data
Ifile -nt '???/I.*'
# change the prefix for the output file in GERT_Reco
sed 's/OutBrick/ARzs_EPI/' GERT_Reco > new_GERT_Reco
# now run the new_GERT_Reco script
tcsh new_GERT_Reco
# -------------------------
# store outliers in a subdir
cd afni
mkdir outliers
mv ARzs_EPI*Outliers.1D outliers
cd ..
# -------------------------
# put the new afni directory back at the top level
mv afni ..
# go back to top level data directory
cd ..
# ---------------------------------------------------------
# put our spgr anat dataset into that new afni directory
mv ARzs_spgr+orig.* afni
# ---------------------------------------------------------
# do volume registering
cd afni
3dvolreg -prefix ARzs_EPI_r1_vr -base ARzs_EPI_r2+orig'[155]' ARzs_EPI_r1+orig
3dvolreg -prefix ARzs_EPI_r2_vr -base ARzs_EPI_r2+orig'[155]' ARzs_EPI_r2+orig
# average the two volume registered time series datasets
3dcalc -a ARzs_EPI_r1_vr+orig -b ARzs_EPI_r2_vr+orig \
-expr '(a+b)/2' -prefix ARzs_EPI_avg
# ---------------------------------------------------------
# create 1D file (Ref_ARzs.1D) containing ideal reference function
waver -GAM -dt 2 -numout 160 \
-inline 15@0 \
15@1 7@0 15@1 8@0 \
15@1 7@0 15@1 8@0 \
15@1 7@0 15@1 8@0 \
10@0 \
> Ref_ARzs.1D
# gasp! actual statistical analysis of data! ...
3ddelay -input ARzs_EPI_avg+orig -ideal_file Ref_ARzs.1D -fs 0.5 -T 45
# ---------------------------------------------------------
# extra stuff
3dIntracranial -anat ARzs_spgr+orig -prefix ARzs_spgr_NoSkl
echo "results are left in directory: ARzs_data/afni"
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=AFNI-test.slurm
#SBATCH -p cloud
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load AFNI/linux_openmp_64-spartan_intel-2017.u2-20190219
# Untar dataset and run script
tar xvf ARzs_data.tgz
./@ARzs_analyze
HowTo #1:
Experiment Background
Block design - measurement of time delay to visual stimulus
The Experiment:
In this example, subjects were presented with a checkered, rotating
hemifield. One run consisted of 30 seconds of rest, followed by 6
pairs of 30 seconds ON (rotating hemifield) and 15 seconds OFF (rest)
intervals, and lastly followed by 20 seconds of rest at the end of the
run. Thus each run was 320 seconds long, with a TR of 2 seconds, providing
160 volume images.
The data:
This EPI data consists of 160 volumes, 18 coronal slices each. These
slice images were taken of the back of the brain (28.8 mm posterior to
96.8 mm posterior). Each slice is a 64 by 64 voxel image, with a voxel
resolution of 3.75 mm on each side. The thickness of each of the 18
slices is 4.0 mm.
The SPGR anatomical data consists of 124 axial slices, from 50.6 mm
inferior to 84.7 mm superior. Each slice is a 256 by 256 voxel image,
with each voxel being a 0.938 mm square. The slice thickness (the
distance between 2 slices) is 1.0 mm for the anatomical images.
Note the difference in resolution between the SPGR data and the EPI
data. The EPI data has a much lower resolution. A single voxel of
the EPI data is basically 4 times the size of a single voxel of SPGR
data, along each of the 3 axes. A single voxel of EPI data will therefore
occupy the same space as approximately 64 voxels of SPGR data.
The SPGR data is located under the SPGR_data directory, in 124 I-files
(i.e., I.001 through I.124).
The EPI data is under the EPI_data directory, in a sequence of directories
generated by a GE scanner: 003, 023, 043, 063, 083 and 103. This is
slightly more than 2 runs of data contained in almost 6000 I-files.
Background:
Because of the retinotopic organization of some visual cortical areas,
stimuli in different parts of the visual field stimulate different parts of
the visual cortex. To determine the mapping between the visual field and
retinotopic areas, we use a visual stimulus that repeatedly sweeps through
the visual field in 30 seconds, every 45 seconds. This causes different
portions of retinotopic areas to be activated at different times depending
on the portion of the visual field that they represent. For example, if a
stimulus starts as a small annulus in the middle of the visual field
(foveal) and expands to the peripheral part of the visual field 10 seconds
later, then we expect cortical areas mapped to the peripheral part of the
visual field to respond with a 10 second delay relative to areas mapped to
the foveal part. Thus by estimating the response delay of the activated
voxels we can determine which part of the visual field they respond to.
We predict that different portions of the visual cortex will respond with
different delays depending on what portions of the visual field they
respond to.
This is a simplified exposition of retinotopic analysis. For more details
see: [1-3]. Estimating the response delay of activated voxels is done
using the program 3ddelay (or the AFNI plugin Hilbert Delay 98).
The program implements a computationally efficient way of estimating the
response delay of each voxel along with its cross-correlation coefficient
which is used to determine whether a voxel is activated by the stimulus or
not. The response delay is estimated using the Hilbert Transform of the
cross correlation function. For nauseating details about FMRI response
delays and delay estimation algorithm see [4, 5].
References:
1. Engel, S.A., et al., fMRI of human visual cortex. Nature, 1994.
369(6481): p. 525.
2. Sereno, M.I., et al., Borders of multiple visual areas in humans
revealed by functional MRI. Science, 1995. 268(268): p. 889-893.
3. DeYoe, E.A., et al., Functional magnetic resonance imaging (FMRI) of
the human brain. J. Neuroscience Methods, 1994. 54: p. 171-187.
4. Saad, Z.S., E.A. DeYoe, and K.M. Ropella, Estimation of FMRI Response
Delays. Neuroimage, 2002: p. in press.
5. Saad, Z.S., et al., Analysis and use of FMRI response delays. Human
Brain Mappign, 2001. 13(2): p. 74-93.
-------------------------------
-------------------------------
Steps taken to analyze the data:
We have created and run a shell script (called @ARzs_analysis) which
performs the following steps.
o Put both the SPGR data and the EPI data into AFNI bricks.
- The SPGR data is all under the SPGR_data directory, so this can easily
be put into an AFNI brick, using the 'to3d' program.
- The EPI data is more confusing. Although 'to3d' alone works well, we
will use the 'Ifile' program to create a Unix script called
'GERT_Reco'. Simply running this script will create the AFNI bricks
(the script will call 'to3d' with the relevant parameters).
o All of our EPI volumes (160 volumes in each of 2 runs) are
registered with the volume in run 2, time point 156 (sub-brick
155). This is done to correct for small head movements during the
scanning session. The program '3dvolreg' is used for this step.
o The two EPI run datasets are averaged using '3dcalc'.
o A reference file was created using the program 'waver'. This file
represents an ideal response function over the course of the
run. This file will be needed in the next step as an input to the
program '3ddelay'.
o Here we actually create a functional brick. The program '3ddelay'
is used to compute, among other things, a delay sub-brick and a
correlation sub-brick. The delay sub-brick contains the data we
are actually after (the time lag between the ideal response
function and the measured response function at each voxel). The
correlation sub-brick shows the correlation between the measured
data for each voxel and the ideal time series. This might suggest
those voxels for which the delay is relevant.
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=BEDTools-test.slurm
#SBATCH -p cloud
# Run on single CPU
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load BEDTools/2.28.0-spartan_intel-2017.u2
# BEDTools has an extensive test suite; but the tests assumes the wrong
# location for the application!
# So all these tests need to be be modified to include:
# BT=$(which bedtools)
cp -r /usr/local/easybuild/software/BEDTools/2.27.1-intel-2017.u2/test/* .
find ./ -type f -exec sed -i -e 's/${BT-..\/..\/bin\/bedtools}/$(which bedtools)/g' {} \;
sh test.sh
# Specific example commands available here:
# https://bedtools.readthedocs.io/en/latest/content/example-usage.html#bedtools-intersect
# Calculate assembly contiguity statistics
abyss-fac test-unitigs.fa
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=BEAST-test.slurm
#SBATCH -p cloud
# Run on 4 cores
#SBATCH --ntasks=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load Beast/2.3.1-intel-2016.u3
beast Dengue4.env.xml
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Operator analysis
Operator Tuning Count Time Time/Op Pr(accept) Performance suggestion
scale(kappa) 0.4 12653 767 0.06 0.2756 good
frequencies 0.05 12863 730 0.06 0.2756 good
scale(clock.rate) 0.692 379650 22778 0.06 0.2436 good
subtreeSlide(treeModel) 7.834 1899758 48423 0.03 0.1733 good
Narrow Exchange(treeModel) 1899107 45712 0.02 0.1961 good
Wide Exchange(treeModel) 378915 3874 0.01 0.016 good
wilsonBalding(treeModel) 380141 11903 0.03 0.0161 slightly low
scale(treeModel.rootHeight) 0.744 379414 6967 0.02 0.234 good
uniform(nodeHeights(treeModel)) 3797330 134012 0.04 0.4291 slightly high
scale(constant.popSize) 0.31 379929 1822 0.0 0.2584 good
up:clock.rate down:nodeHeights(treeModel) 0.905 380240 15696 0.04 0.2334 slightly high Try setting scaleFactor to about 0.9051
&INFO
isolated hydrogen molecule.
single point calculation.
&END
&CPMD
OPTIMIZE WAVEFUNCTION
CONVERGENCE ORBITALS
1.0d-7
&END
&SYSTEM
SYMMETRY
1
ANGSTROM
CELL
8.00 1.0 1.0 0.0 0.0 0.0
CUTOFF
70.0
&END
&DFT
FUNCTIONAL LDA
&END
&ATOMS
*H_MT_LDA.psp
LMAX=S
2
4.371 4.000 4.000
3.629 4.000 4.000
&END
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=CPMD-test.slurm
#SBATCH -p cloud
# Run on two cores
#SBATCH --ntasks=2
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load CPMD/4.3-intel-2018.u4
# Example taken from Axek Kohlmeyer's classic tutorial
# http://www.theochem.ruhr-uni-bochum.de/~legacy.akohlmey/cpmd-tutor/index.html
mpiexec -np 2 cpmd.x 1-h2-wave.inp > 1-h2-wave.out
This diff is collapsed.
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=GAMESS-test.slurm
#SBATCH -p cloud
# Run on 1 cores
#SBATCH --ntasks=1
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 0:15:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# GAMESS likes memory!
#SBATCH --mem=64G
# Load the environment variables
module load GAMESS-US/20160708-GCC-4.9.2
rungms exam01.inp
GAMESS often requires careful management of memory. Quantum chemistry in particular is memory bound.
In the $SYSTEM group of the input file, you may need to set MWORDS and MEDDI.
MWORDS is the maximum replicated memory which a job can use, on every core. This is given in units of 1,000,000 words (a word is 64 bits, 8 bytes)
MEMDDI is the total memory needed for the distributed data interface (DDI) storage, given in units of 1,000,000 words. The memory required on each processor core for a run using n cores is MEMDDI/n + MWORDS.
See:
http://www.msg.ameslab.gov/gamess/GAMESS_Manual/input.pdf
! EXAM01.
!
! 1-A-1 CH2 RHF geometry optimization using Firefly.
!
! Although internal coordinates are used (COORD=ZMAT),
! the optimization is done in Cartesian space (NZVAR=0).
! This run uses a criterion (OPTTOL) on the gradient
! which is tighter than is normally used.
!
! This job tests the sp integral module, the RHF module,
! and the geometry optimization module.
!
! Using the default search METHOD=GDIIS,
! FINAL ENERGY IS -37.2322678094 AFTER 8 ITERS RMS GRADIENT = 0.0264308
! FINAL ENERGY IS -37.2308175559 AFTER 7 ITERS RMS GRADIENT = 0.0320880
! FINAL ENERGY IS -37.2376451083 AFTER 7 ITERS RMS GRADIENT = 0.0046682
! FINAL ENERGY IS -37.2379963822 AFTER 8 ITERS RMS GRADIENT = 0.0016453
! FINAL ENERGY IS -37.2380396086 AFTER 8 ITERS RMS GRADIENT = 0.0001526
! FINAL ENERGY IS -37.2380397693 AFTER 5 ITERS RMS GRADIENT = 0.0000026
!
$CONTRL SCFTYP=RHF RUNTYP=OPTIMIZE COORD=ZMT NZVAR=0 $END
$SYSTEM TIMLIM=2 MEMORY=580000 $END
$STATPT OPTTOL=1.0E-5 $END
$BASIS GBASIS=STO NGAUSS=2 $END
$GUESS GUESS=HUCKEL $END
$DATA
Methylene...1-A-1 state...RHF/STO-2G
Cnv 2
C
H 1 rCH
H 1 rCH 2 aHCH
rCH=1.09
aHCH=110.0
$END
#!/bin/bash
# To give your job a name, replace "MyJob" with an appropriate name
#SBATCH --job-name=JAGS-test.slurm
#SBATCH -p cloud
# Run on four CPUs
#SBATCH --ntasks=4
# set your minimum acceptable walltime=days-hours:minutes:seconds
#SBATCH -t 1:00:00
# Specify your email address to be notified of progress.
# SBATCH --mail-user=youreamiladdress@unimelb.edu
# SBATCH --mail-type=ALL
# Load the environment variables
module load JAGS/4.3.0-intel-2017.u2
# Extract the classic BUGS examples
tar xzvf classic-bugs.tar.gz
sleep 240
cd classic-bugs/vol1
make -j4 check
cd ../vol2
make -j4 check
library(rjags, quietly=TRUE)
check.fun <- function() {
checkstats <- summary(x)$statistics
if (is.matrix(benchstats)) {
rnb <- order(rownames(benchstats))
rnc <- order(rownames(checkstats))
z <- (benchstats[rnb,1] - checkstats[rnc,1])
z <- ifelse(z==0, 0, z/benchstats[rnb,2])
} else {
z <- (benchstats[1] - checkstats[1])
z <- ifelse(z==0, 0, z/benchstats[2])
}
print(z)
if (any(abs(z) > 0.15)) {
stop("FAIL")
quit(save="no", status=1)
} else {
cat("OK\n")
}
}
check.data <- function(m, data, skip)
{
mdata <- m$data()
if (!missing(skip)) {
mdata[skip] <- NULL
data[skip] <- NULL
}
ok <- isTRUE(all.equal(mdata[sort(names(mdata))], data[sort(names(data))]))
if (!ok) stop("Data mismatch")
}
library(coda)
x <- read.openbugs(quiet=TRUE)
benchstats <- summary(x)$statistics
dump("benchstats", file=benchfile)
library(coda, quietly=TRUE)
x <- read.openbugs(quiet=TRUE)
checkstats <- summary(x)$statistics
if (is.matrix(benchstats)) {
rnb <- order(rownames(benchstats))
rnc <- order(rownames(checkstats))
z <- (benchstats[rnb,1] - checkstats[rnc,1])
z <- ifelse(z==0, 0, z/benchstats[rnb,2])
} else {
z <- (benchstats[1] - checkstats[1])
z <- ifelse(z==0, 0, z/benchstats[2])
}
print(z)
q(status=(any(abs(z) > 0.15)))
Classic BUGS examples
---------------------
Some of the BUGS examples have been modified to run with JAGS, and have
been turned into a test suite.
Each subdirectory contains a number of JAGS script files named test1.cmd,
test2.cmd, ... etc. designed to run the examples as they were presented
in the classic BUGS examples book.
There are also R script files test1.R, test2.R ... to run the
examples using the rjags interface from JAGS to R.
Some of the examples have been modified. In such cases, there is a
ReadMe file explaining the changes. Some examples cannot be run at all,
and these are not included in the testing process.
Make targets
------------
The test suite uses GNU make. To run the tests, change directory into
either "vol1" or "vol2" and invoke make with one of the following targets.
make check Will run all the examples and check the results against
the benchmark run. The check ensures that the posterior
mean of the monitored nodes is not more than 0.15 standard
deviations away from the benchmark mean.
make Rcheck Runs examples using the rjags interface.
make clean Cleans up any files that may have been created during
make check, or during debugging
make bench Creates new benchmark results
make distclean Deletes the benchmark results
In order to run make for a specific example, use the variable "EXAMPLES":
make check EXAMPLES=line
Debugging an example
--------------------
If "Make check" fails for example foo, then make will report an error in
make taget "check_foo". The log file "check.log" in subdirectory "foo"
can be inspected to see what went wrong. This log file includes the
output produced by JAGS when running the model and by R when checking
the output. Similarly, "make Rcheck" exands to targets of the form
"Rcheck_foo" and the log file is "Rcheck.log".
Note that the tests in "make check" and "make Rcheck" are not deterministic and
will occasionally fail just by chance. You may run "make check" again
when this happens to see if the error is consistently reproduced.
Re-running the examples
-----------------------
Once all the tests have been successfully completed in a sub-directory,
an empty marker file will be created (check.OK or Rcheck.OK) and the
corresponding checks will be skipped. To re-run the checks, first use
"make clean" to remove the marker files.
Parallel make
-------------
The examples in separate directories can be run in parallel for increased
efficiency. Use the "-j" argument to run examples in parallel, e.g
make -j4 check
Will run 4 parallel make jobs.
## By default we run examples in all sub-directories. This can be
## overridden by the user-specified variable "EXAMPLES"
EXAMPLES ?= blocker bones dyes equiv inhaler leuk line litters mice pump \
rats salm seeds oxford lsat
export JAGS ?= /usr/local/bin/jags
export TIME ?= /usr/bin/time
## Expand example names to make targets
bench_EX=$(addprefix bench_,$(EXAMPLES))
check_EX=$(addprefix check_,$(EXAMPLES))
Rcheck_EX=$(addprefix Rcheck_,$(EXAMPLES))
clean_EX=$(addprefix clean_,$(EXAMPLES))
distclean_EX=$(addprefix distclean_,$(EXAMPLES))
checktime_EX=$(addprefix checktime_,$(EXAMPLES))
## All make targets are phony: i.e. they do not correspond to file
## names
.PHONY: all bench check checktime Rcheck clean distclean \
$(bench_EX) $(check_EX) $(checktime_EX) $(Rcheck_EX) $(clean_EX) $(distclean_EX)
## Make targets
all:
bench: $(bench_EX)
check: $(check_EX)
Rcheck: $(Rcheck_EX)
clean: $(clean_EX)
distclean: $(distclean_EX)
checktime: $(checktime_EX)
## Dispatch targets to subdirectories. They can be run in parallel
$(bench_EX):
$(MAKE) -f $(CURDIR)/Makefile.sub -C $(patsubst bench_%,%,$@) bench
$(check_EX):
$(MAKE) -f $(CURDIR)/Makefile.sub -C $(patsubst check_%,%,$@) check
$(Rcheck_EX):
$(MAKE) -f $(CURDIR)/Makefile.sub -C $(patsubst Rcheck_%,%,$@) Rcheck
$(clean_EX):
$(MAKE) -f $(CURDIR)/Makefile.sub -C $(patsubst clean_%,%,$@) clean
$(distclean_EX):
$(MAKE) -f $(CURDIR)/Makefile.sub -C $(patsubst distclean_%,%,$@) distclean
$(checktime_EX):
$(MAKE) -f $(CURDIR)/Makefile.sub -C $(patsubst checktime_%,%,$@) checktime
## -*-Makefile-*-
## Common makefile to be run in each subdirectory
.PHONY: bench check checktime Rcheck clean distclean
bench: test1.cmd
@for cmdfile in `ls test*.cmd`; do \
rm -f bench.log; \
$(JAGS) $${cmdfile} >> bench.log 2>> bench.log && \
Rscript -e "benchfile <- 'bench-$${cmdfile%%.cmd}.R'; source('../../R/bench.R')" >> bench.log 2>> bench.log || exit 1; \
done;
check: check.OK
check.OK: test1.cmd
@for cmdfile in `ls test*.cmd`; do \
rm -f check.log check.OK; \
$(JAGS) $${cmdfile} >> check.log 2>> check.log && \
Rscript -e "source('bench-$${cmdfile%%.cmd}.R');source('../../R/check.R')" >> check.log 2>> check.log && \
touch check.OK || exit 1; \
done;
checktime: checktime.OK
checktime.OK: test1.cmd
@for cmdfile in `ls test*.cmd`; do \
rm -f checktime.OK; \
$(TIME) -f "$${d} $${cmdfile} %e %U %S" -o times -a \
$(JAGS) $${cmdfile} >> /dev/null 2>> /dev/null && \
touch checktime.OK || exit 1; \
done;
Rcheck: Rcheck.OK
Rcheck.OK: test1.R
@for cmdfile in `ls test*.R`; do \
rm -f Rcheck.log Rcheck.OK; \
Rscript $${cmdfile} >> Rcheck.log 2>> Rcheck.log && \
touch Rcheck.OK || exit 1; \
done;
clean:
@rm -f core jags.dump CODAchain*.txt CODAindex.txt gmon.out check.log Rcheck.log Rcheck.OK check.OK checktime.OK
distclean: clean
@rm -f bench-*.R
There are no benchmarks for blockert.bug and blockht.bug because
of the difficulty in getting stable estimates of the posterior
variance for d.
"benchstats" <-
structure(c(-0.253224633406667, -0.254799456117667, 0.115400764300000,
0.0597799345822366, 0.143834428983032, 0.0683470040191467, 0.00109142728856247,
0.00262604537666272, 0.00124783986130610, 0.00144641385932831,
0.00275380793262567, 0.00204171850594970), .Dim = as.integer(c(3,
4)), .Dimnames = list(c("d", "delta.new", "sigma"), c("Mean",
"SD", "Naive SE", "Time-series SE")))
"benchstats" <-
structure(c(-0.23204102101, -0.263648382132, 0.185695176100,
0.104203323304632, 0.566292620568805, 0.119054449455133, 0.00329519841401541,
0.0179077450314294, 0.00376483225855613, 0.00340734497609960,
0.0176326420872072, 0.0039840930701565), .Dim = as.integer(c(3,
4)), .Dimnames = list(c("d", "delta.new", "sigma"), c("Mean",
"SD", "Naive SE", "Time-series SE")))
"benchstats" <-
structure(c(-0.240132002598000, -0.245208930790601, 0.73370635672,
0.257287216245156, 1.17803384757472, 0.872850799145309, 0.00363859070639119,
0.0166599144417473, 0.0123439743807949, 0.00431558173652028,
0.0170285034618671, 0.0152254432668114), .Dim = as.integer(c(3,
4)), .Dimnames = list(c("d", "delta.new", "sigma"), c("Mean",
"SD", "Naive SE", "Time-series SE")))
"rt" <-
c(3, 7, 5, 102, 28, 4, 98, 60, 25, 138, 64, 45, 9, 57, 25, 33,
28, 8, 6, 32, 27, 22)
"nt" <-
c(38, 114, 69, 1533, 355, 59, 945, 632, 278, 1916, 873, 263,
291, 858, 154, 207, 251, 151, 174, 209, 391, 680)
"rc" <-
c(3, 14, 11, 127, 27, 6, 152, 48, 37, 188, 52, 47, 16, 45, 31,
38, 12, 6, 3, 40, 43, 39)
"nc" <-
c(39, 116, 93, 1520, 365, 52, 939, 471, 282, 1921, 583, 266,
293, 883, 147, 213, 122, 154, 134, 218, 364, 674)
"Num" <-
22
"d" <-
0
"delta.new" <-
0
"tau" <-
1
"mu" <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0)
"delta" <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0)
model {
for (i in 1:Num) {
rt[i] ~ dbin(pt[i], nt[i]);
rc[i] ~ dbin(pc[i], nc[i]);
ld.rt[i] <- logdensity.bin(rt[i], pt[i], nt[i]);
ld.rc[i] <- logdensity.bin(rc[i], pc[i], nc[i]);
logit(pc[i]) <- mu[i]
logit(pt[i]) <- mu[i] + delta[i];
delta[i] ~ dnorm(d, tau);
mu[i] ~ dnorm(0.0, 1.0E-5);
}
d ~ dnorm(0.0, 1.0E-6);
tau ~ dgamma(1.0E-3, 1.0E-3);
delta.new ~ dnorm(d,tau);
sigma <- 1/sqrt(tau);
}
"d" <-
0
"tau" <-
1
"mu" <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0)
"delta" <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0)
model {
for (i in 1:Num) {
rt[i] ~ dbin(pt[i], nt[i]);
rc[i] ~ dbin(pc[i], nc[i]);
logit(pc[i]) <- mu[i];
logit(pt[i]) <- mu[i] + delta[i];
delta[i] ~ dt(d, tau, 4);
mu[i] ~ dnorm(0.0, 1.0E-5);
}
d ~ dnorm(0.0, 1.0E-6);
delta.new ~ dt(d, tau, 4);
tau ~ dgamma(1.0E-3, 1.0E-3);
sigma <- sqrt(1/(tau*2)); # SD of t distribution on 4 degrees of freedom
}
"rt" <-
c(3, 7, 5, 102, 28, 4, 98, 60, 25, 138, 64, 45, 9, 57, 25, 33,
28, 8, 6, 32, 27, 22)
"nt" <-
c(38, 114, 69, 1533, 355, 59, 945, 632, 278, 1916, 873, 263,
291, 858, 154, 207, 251, 151, 174, 209, 391, 680)
"rc" <-
c(3, 14, 11, 127, 27, 6, 152, 48, 37, 188, 52, 47, 16, 45, 31,
38, 12, 6, 3, 40, 43, 39)
"nc" <-
c(39, 116, 93, 1520, 365, 52, 939, 471, 282, 1921, 583, 266,
293, 883, 147, 213, 122, 154, 134, 218, 364, 674)
"Num" <-
22
"Nbins" <-
10
"eta" <-
c(2,4,6,8,10,12,15,20,30,50)
model {
for (i in 1:Num) {
rt[i] ~ dbin(pt[i],nt[i]);
rc[i] ~ dbin(pc[i],nc[i]);
logit(pc[i]) <- mu[i];
logit(pt[i]) <- mu[i] + delta[i];
delta[i] ~ dt(d,tau,v);
mu[i] ~ dnorm(0.0, 1.0E-5);
}
delta.new ~ dt(d,tau,v);
d ~ dnorm(0.0,1.0E-6);
tau ~ dgamma(1.0E-3,1.0E-3);
sigma <- 1/sqrt(tau);
for (n in 1:Nbins) {
prior[n] <- 1/Nbins; # Uniform prior on v
}
k ~ dcat(prior[]);
v <- eta[k]; # degrees of freedom for t
}
/* blocker example with t-distribution for treatment effect */
model in blockert.bug
data in blocker-data.R
compile, nchains(2)
parameters in blockert-init.R
initialize
update 1000
monitor d, thin(10)
monitor delta.new, thin(10)
monitor sigma, thin(10)
update 10000
coda *
model in blockht.bug
data in blockht-data.R
compile, nchains(2)
parameters in blocker-init.R
initialize
update 1000
monitor d
monitor delta.new
monitor sigma
update 50000
coda *
source("../../R/Rcheck.R")
load.module("glm")
data <- read.jagsdata("blocker-data.R")
inits <- read.jagsdata("blocker-init.R")
m <- jags.model("blocker.bug", data, inits, n.chains = 2)
check.data(m, data)
update(m, 3000)
x <- coda.samples(m, c("d","delta.new","sigma"), n.iter=30000, thin=10)
source("bench-test1.R")
check.fun()
model in blocker.bug
data in blocker-data.R
load glm
compile, nchains(2)
parameters in blocker-init.R
initialize
update 3000
monitor d, thin(10)
monitor delta.new, thin(10)
monitor sigma, thin(10)
update 30000
coda *
"benchstats" <-
structure(c(0.328925666593290, 1.361773719, 2.35485909399999,
2.905017817, 5.52899987699997, 6.74962981200002, 6.44900819500003,
8.93038915700002, 8.98999504799997, 11.961565614, 11.567550053,
15.7992240800000, 16.9442522500000, 0.206015615498089, 0.254427219748402,
0.275674901128758, 0.291994557182881, 0.505432028484243, 0.599825513774583,
0.599116612277779, 0.707646133200092, 0.668964478579138, 0.693675845768788,
0.899031063807028, 0.564875897503258, 0.758890337282891, 0.00206015615498089,
0.00254427219748402, 0.00275674901128758, 0.00291994557182881,
0.00505432028484243, 0.00599825513774583, 0.00599116612277779,
0.00707646133200092, 0.00668964478579138, 0.00693675845768788,
0.00899031063807028, 0.00564875897503258, 0.00758890337282891,
0.00258761771180866, 0.00323086392832006, 0.00349439227542114,
0.00362274300823012, 0.0062281082156432, 0.00760436021782597,
0.00760672870198849, 0.0090917253449894, 0.00835866060958007,
0.00865514003580626, 0.0116223001575147, 0.00738127204149733,
0.00975892410983544), .Dim = as.integer(c(13, 4)), .Dimnames = list(
c("theta[1]", "theta[2]", "theta[3]", "theta[4]", "theta[5]",
"theta[6]", "theta[7]", "theta[8]", "theta[9]", "theta[10]",
"theta[11]", "theta[12]", "theta[13]"), c("Mean", "SD", "Naive SE",
"Time-series SE")))
"benchstats" <-
structure(c(0.328925666593290, 1.361773719, 2.35485909399999,
2.905017817, 5.52899987699997, 6.74962981200002, 6.44900819500003,
8.93038915700002, 8.98999504799997, 11.961565614, 11.567550053,
15.7992240800000, 16.9442522500000, 0.206015615498089, 0.254427219748402,
0.275674901128758, 0.291994557182881, 0.505432028484243, 0.599825513774583,
0.599116612277779, 0.707646133200092, 0.668964478579138, 0.693675845768788,
0.899031063807028, 0.564875897503258, 0.758890337282891, 0.00206015615498089,
0.00254427219748402, 0.00275674901128758, 0.00291994557182881,
0.00505432028484243, 0.00599825513774583, 0.00599116612277779,
0.00707646133200092, 0.00668964478579138, 0.00693675845768788,
0.00899031063807028, 0.00564875897503258, 0.00758890337282891,
0.00258761771180866, 0.00323086392832006, 0.00349439227542114,
0.00362274300823012, 0.0062281082156432, 0.00760436021782597,
0.00760672870198849, 0.0090917253449894, 0.00835866060958007,
0.00865514003580626, 0.0116223001575147, 0.00738127204149733,
0.00975892410983544), .Dim = as.integer(c(13, 4)), .Dimnames = list(
c("theta[1]", "theta[2]", "theta[3]", "theta[4]", "theta[5]",
"theta[6]", "theta[7]", "theta[8]", "theta[9]", "theta[10]",
"theta[11]", "theta[12]", "theta[13]"), c("Mean", "SD", "Naive SE",
"Time-series SE")))
"nChild" <-
13
"nInd" <-
34
"nGrade" <-
5
"gamma" <-
structure(c(0.7425, 10.267, 10.5215, 9.3877, 0.2593, -0.5998,
10.5891, 6.6701, 8.8921, 12.4275, 12.4788, 13.7778, 5.8374, 6.9485,
13.7184, 14.3476, 4.8066, 9.1037, 10.7483, 0.3887, 3.2573, 11.6273,
15.8842, 14.8926, 15.5487, 15.4091, 3.9216, 15.475, 0.4927, 1.3059,
1.5012, 0.8021, 5.0022, 4.0168, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1.0153, 7.0421, 14.4242,
17.4685, 16.7409, 16.872, 17.0061, 5.2099, 16.9406, 1.3556, 1.8793,
1.8902, 2.3873, 6.3704, 5.1537, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, 17.4944, 2.3016, 2.497, 2.3689, 3.9525, 8.2832, 7.1053,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 3.2535, 3.2306,
2.9495, 5.3198, 10.4988, 10.3038), .Dim = c(34, 4))
"delta" <-
c(2.9541, 0.6603, 0.7965, 1.0495, 5.7874, 3.8376, 0.6324, 0.8272,
0.6968, 0.8747, 0.8136, 0.8246, 0.6711, 0.978, 1.1528, 1.6923,
1.0331, 0.5381, 1.0688, 8.1123, 0.9974, 1.2656, 1.1802, 1.368,
1.5435, 1.5006, 1.6766, 1.4297, 3.385, 3.3085, 3.4007, 2.0906,
1.0954, 1.5329)
"ncat" <-
c(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
3, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5)
"grade" <-
structure(c(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, NA,
2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, NA, 2, 2, 1, 1, 1, 1, 1, 2, 1, 2,
2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1,
1, 1, 1, NA, NA, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, NA, NA, 1,
1, NA, 2, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 2, 2, 1, 1, 1,
NA, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2,
2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, NA, 2, 2, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, NA, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
2, 2, 1, 1, 1, 1, 1, NA, NA, 2, 1, NA, 1, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
1, 1, 1, 1, 2, 2, 3, 2, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, NA, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, NA, NA, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 2, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 4, 2, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 3, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 1, 1, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2,
2, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 2, 3, 3, 3, 4,
5, 5, 5, 5, 1, 1, 1, 1, 3, 3, 3, 4, 4, 5, 5, 5, 5), .Dim = c(13,
34))
"theta" <-
c(0.5, 1, 2, 3, 5, 6, 7, 8, 9, 12, 13, 16, 18)
"grade" <-
structure(c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, 1,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, 1, NA, NA, 1,
NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA,
NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, 1, 1, NA, NA, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, 1, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), .Dim = as.integer(c(13,
34)))
model {
for (i in 1:nChild) {
theta[i] ~ dnorm(0.0, 0.001);
for (j in 1:nInd) {
# Cumulative probability of > grade k given theta
for (k in 1:(ncat[j]-1)) {
logit(Q[i,j,k]) <- delta[j]*(theta[i] - gamma[j,k]);
}
Q[i,j,ncat[j]] <- 0;
}
for (j in 1:nInd) {
# Probability of observing grade k given theta
p[i,j,1] <- 1 - Q[i,j,1];
for (k in 2:ncat[j]) {
p[i,j,k] <- Q[i,j,(k-1)] - Q[i,j,k];
}
grade[i,j] ~ dcat(p[i,j,1:ncat[j]]);
}
}
}
/* Bones example rewritten using the dordered.logit distribution
We need to rescale the break points gamma -> gamma.star
*/
data {
for (j in 1:nInd) {
nbreak[j] <- ncat[j] - 1
}
}
model {
for (j in 1:nInd) {
gamma.star[j,1:nbreak[j]] <- delta[j]*gamma[j,1:nbreak[j]]
}
for (i in 1:nChild) {
theta[i] ~ dnorm(0.0, 0.001);
}
for (i in 1:nChild) {
for (j in 1:nInd) {
mu[i,j] <- delta[j]*theta[i]
grade[i,j] ~ dordered.logit(mu[i,j], gamma.star[j,1:nbreak[j]])
}
}
}
source("../../R/Rcheck.R")
data <- read.jagsdata("bones-data.R")
data$nGrade <- NULL #not used in this model
m <- jags.model("bones.bug", data, n.chains=2)
check.data(m, data)
update(m, 1000)
x <- coda.samples(m, c("theta"), n.iter=10000)
source("bench-test1.R")
check.fun()
model in bones.bug
data in bones-data.R
compile, nchains(2)
parameters in bones-init.R
initialize
update 1000
monitor theta
update 10000
coda theta
source("../../R/Rcheck.R")
data <- read.jagsdata("bones-data.R")
data$nGrade <- NULL #not used in this model
load.module("glm")
m <- jags.model("bones2.bug", data, n.chains=2)
check.data(m, data, skip="nbreak")
update(m, 1000)
x <- coda.samples(m, c("theta"), n.iter=10000)
source("bench-test1.R")
check.fun()
model in bones2.bug
data in bones-data.R
load glm
compile, nchains(2)
parameters in bones-init.R
initialize
samplers to foo-samplers.txt
update 1000
monitor theta
update 10000
coda theta
"benchstats" <-
structure(c(1527.56852, 3042.44997500001, 2167.10592327016, 0.334602909528699,
21.3952772745395, 1066.22807536413, 3108.27390518326, 0.241399354005255,
0.478412943833268, 23.8415845603296, 69.503117446785, 0.00539785365280285,
0.518994073152718, 27.2796486124696, 71.8877553203144, 0.0065916991537614
), .Dim = as.integer(c(4, 4)), .Dimnames = list(c("theta", "sigma2.within",
"sigma2.between", "f.between"), c("Mean", "SD", "Naive SE", "Time-series SE"
)))
"y" <-
structure(c(1545, 1540, 1595, 1445, 1595, 1520, 1440, 1555, 1550,
1440, 1630, 1455, 1440, 1490, 1605, 1595, 1515, 1450, 1520, 1560,
1510, 1465, 1635, 1480, 1580, 1495, 1560, 1545, 1625, 1445), .Dim = c(6,
5))
"BATCHES" <-
6
"SAMPLES" <-
5
"theta" <- 1500
"tau.within" <- 1
"tau.between" <- 1
model {
for (i in 1:BATCHES) {
for (j in 1:SAMPLES) {
y[i,j] ~ dnorm(mu[i], tau.within);
}
mu[i] ~ dnorm(theta, tau.between);
}
theta ~ dnorm(0.0, 1.0E-10);
tau.within ~ dgamma(0.001, 0.001); sigma2.within <- 1/tau.within;
tau.between ~ dgamma(0.001, 0.001); sigma2.between <- 1/tau.between;
sigma2.total <- sigma2.within + sigma2.between;
f.within <- sigma2.within/sigma2.total;
f.between <- sigma2.between/sigma2.total;
}
source("../../R/Rcheck.R")
data <- read.jagsdata("dyes-data.R")
m <- jags.model("dyes.bug", data, n.chains=2)
check.data(m, data)
update(m, 5000)
x <- coda.samples(m, c("theta","sigma2.within","sigma2.between","f.between"),
n.iter=100000, thin=50)
source("bench-test1.R")
check.fun()
model in dyes.bug
data in dyes-data.R
compile, nchains(2)
parameters in dyes-init.R
initialize
update 5000
monitor theta, thin(50)
monitor sigma2.within, thin(50)
monitor sigma2.between, thin(50)
monitor f.between, thin(50)
update 100000
coda *
benchstats <-
structure(c(-1.3364843048965, 0.8857548235, -0.948221539745,
0.3409922147826, 0.483523797157, -0.166734238845, 0.542756667,
1.27772876176106, 0.137639803798128, 0.427066653443961, 0.219644174354971,
0.377201177302070, 0.0548238742472561, 0.0660601784258409, 0.0285708836810436,
0.00307771957702348, 0.00954950068024042, 0.0049113930471953,
0.0084344747364038, 0.00122589909606765, 0.00147715049565945,
0.0306373037254135, 0.00301569343090085, 0.0119995305179986,
0.00559785083353585, 0.00902764024983775, 0.00188271237213285,
0.00214224408183078), .Dim = c(7L, 4L), .Dimnames = list(c("alpha0",
"alpha.Base", "alpha.Trt", "alpha.BT", "alpha.Age", "alpha.V4",
"sigma.b1"), c("Mean", "SD", "Naive SE", "Time-series SE")))
benchstats <-
structure(c(-1.37189672981, 0.8736528925, -0.9605229726, 0.355106918969,
0.479617611166, -0.1013694932506, 0.499196704, 0.3629548375,
1.24596963892823, 0.139269373665539, 0.42677791506752, 0.217604463793377,
0.363695771040492, 0.0875368695485932, 0.0715217646809272, 0.044228441996263,
0.0278607281054439, 0.00311415786699964, 0.00954304429386607,
0.00486578373249383, 0.0081324846717574, 0.00195738390848186,
0.00159927527697297, 0.000988978028425506, 0.0290481340303291,
0.00321706133553644, 0.00999669190698694, 0.00526391236992741,
0.00855921171217479, 0.00239996464569182, 0.00192372054467975,
0.00145567919667494), .Dim = c(8L, 4L), .Dimnames = list(c("alpha0",
"alpha.Base", "alpha.Trt", "alpha.BT", "alpha.Age", "alpha.V4",
"sigma.b1", "sigma.b"), c("Mean", "SD", "Naive SE", "Time-series SE"
)))
"N" <- 59
"T" <- 4
"y" <-
structure(c(5, 3, 2, 4, 7, 5, 6, 40, 5, 14, 26, 12, 4, 7, 16,
11, 0, 37, 3, 3, 3, 3, 2, 8, 18, 2, 3, 13, 11, 8, 0, 3, 2, 4,
22, 5, 2, 3, 4, 2, 0, 5, 11, 10, 19, 1, 6, 2, 102, 4, 8, 1, 18,
6, 3, 1, 2, 0, 1, 3, 5, 4, 4, 18, 2, 4, 20, 6, 13, 12, 6, 4,
9, 24, 0, 0, 29, 5, 0, 4, 4, 3, 12, 24, 1, 1, 15, 14, 7, 4, 6,
6, 3, 17, 4, 4, 7, 18, 1, 2, 4, 14, 5, 7, 1, 10, 1, 65, 3, 6,
3, 11, 3, 5, 23, 3, 0, 4, 3, 3, 0, 1, 9, 8, 0, 21, 6, 6, 6, 8,
6, 12, 10, 0, 3, 28, 2, 6, 3, 3, 3, 2, 76, 2, 4, 13, 9, 9, 3,
1, 7, 1, 19, 7, 0, 7, 2, 1, 4, 0, 25, 3, 6, 2, 8, 0, 72, 2, 5,
1, 28, 4, 4, 19, 0, 0, 3, 3, 3, 5, 4, 21, 7, 2, 12, 5, 0, 22,
4, 2, 14, 9, 5, 3, 29, 5, 7, 4, 4, 5, 8, 25, 1, 2, 12, 8, 4,
0, 3, 4, 3, 16, 4, 4, 7, 5, 0, 0, 3, 15, 8, 7, 3, 8, 0, 63, 4,
7, 5, 13, 0, 3, 8, 1, 0, 2), .Dim = c(59, 4))
"Trt" <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
"Base" <-
c(11, 11, 6, 8, 66, 27, 12, 52, 23, 10, 52, 33, 18, 42, 87, 50,
18, 111, 18, 20, 12, 9, 17, 28, 55, 9, 10, 47, 76, 38, 19, 10,
19, 24, 31, 14, 11, 67, 41, 7, 22, 13, 46, 36, 38, 7, 36, 11,
151, 22, 41, 32, 56, 24, 16, 22, 25, 13, 12)
"Age" <-
c(31, 30, 25, 36, 22, 29, 31, 42, 37, 28, 36, 24, 23, 36, 26,
26, 28, 31, 32, 21, 29, 21, 32, 25, 30, 40, 19, 22, 18, 32, 20,
30, 18, 24, 30, 35, 27, 20, 22, 28, 23, 40, 33, 21, 35, 25, 26,
25, 22, 32, 25, 35, 21, 41, 32, 26, 21, 36, 37)
"Visit" <-
c(-3, -1, 1, 3)
"V4" <-
c(0, 0, 0, 1)
"alpha0" <-
1
"alpha.Base" <-
0
"alpha.Trt" <-
0
"alpha.BT" <-
0
"alpha.Age" <-
0
"alpha.V4" <-
0
"tau.b1" <-
1
"tau.b" <-
1
/*
Model II from section 6.2 of Breslow NE and Clayton DG "Approximate
inference in generalized linear mixed models", JASA, Vol 88, pp 9-25 (1993)
analyzing the epilepsy data of Thall and Vail (1990).
*/
model {
for(j in 1:N) {
for(k in 1:T) {
log(mu[j,k]) <- alpha0 + alpha.Base * log.Base4[j]
+ alpha.Trt * Trt[j]
+ alpha.BT * Trt[j] * log.Base4[j]
+ alpha.Age * log.Age[j]
+ alpha.V4 * V4[k]
+ b1[j];
y[j,k] ~ dpois(mu[j,k]);
}
b1[j] ~ dnorm(0.0,tau.b1); # subject random effects
log.Base4[j] <- log(Base[j]/4);
log.Age[j] <- log(Age[j]);
}
# priors:
alpha0 ~ dnorm(0.0,1.0E-4);
alpha.Base ~ dnorm(0.0,1.0E-4);
alpha.Trt ~ dnorm(0.0,1.0E-4);
alpha.BT ~ dnorm(0.0,1.0E-4);
alpha.Age ~ dnorm(0.0,1.0E-4);
alpha.V4 ~ dnorm(0.0,1.0E-4);
tau.b1 ~ dgamma(1.0E-3,1.0E-3);
sigma.b1 <- 1.0/sqrt(tau.b1);
}
/*
Model III from section 6.2 of Breslow NE and Clayton DG "Approximate
inference in generalized linear mixed models", JASA, Vol 88, pp 9-25 (1993)
analyzing the epilepsy data of Thall and Vail (1990).
This model has unit level random effects as well as subject-level
random effects
*/
model {
for(j in 1:N) {
for(k in 1:T) {
log(mu[j,k]) <- alpha0 + alpha.Base * log.Base4[j]
+ alpha.Trt * Trt[j]
+ alpha.BT * log.Base4[j] * Trt[j]
+ alpha.Age * log.Age[j]
+ alpha.V4 * V4[k]
+ b1[j] + b[j,k];
y[j,k] ~ dpois(mu[j,k]);
b[j,k] ~ dnorm(0.0, tau.b);
}
b1[j] ~ dnorm(0.0,tau.b1); # subject random effects
log.Base4[j] <- log(Base[j]/4);
log.Age[j] <- log(Age[j]);
}
# priors:
alpha0 ~ dnorm(0.0,1.0E-4);
alpha.Base ~ dnorm(0.0,1.0E-4);
alpha.Trt ~ dnorm(0.0,1.0E-4);
alpha.BT ~ dnorm(0.0,1.0E-4);
alpha.Age ~ dnorm(0.0,1.0E-4);
alpha.V4 ~ dnorm(0.0,1.0E-4);
tau.b1 ~ dgamma(1.0E-3,1.0E-3);
sigma.b1 <- 1.0/sqrt(tau.b1);
tau.b ~ dgamma(1.0E-3,1.0E-3);
sigma.b <- 1.0/sqrt(tau.b);
}
source("../../R/Rcheck.R")
load.module("glm")
d <- read.jagsdata("epil-data.R")
m <- jags.model("epil2.bug", data=d, n.chains=2)
check.data(m, d)
update(m, 1000)
x <- coda.samples(m, c("alpha0", "alpha.Base", "alpha.Trt", "alpha.BT",
"alpha.Age", "alpha.V4", "sigma.b1"), n.iter=5000,
thin=5)
source("bench-test1.R")
check.fun()
model in "epil2.bug"
data in epil-data.R
load glm
compile, nchains(2)
inits in epil-inits.R
initialize
update 1000
monitor alpha0, thin(5)
monitor alpha.Base, thin(5)
monitor alpha.Trt, thin(5)
monitor alpha.BT, thin(5)
monitor alpha.Age, thin(5)
monitor alpha.V4, thin(5)
monitor sigma.b1, thin(5)
update 5000
coda *
source("../../R/Rcheck.R")
load.module("glm")
d <- read.jagsdata("epil-data.R")
m <- jags.model("epil3.bug", data=d, n.chains=2)
check.data(m, d)
update(m, 1000)
x <- coda.samples(m, c("alpha0", "alpha.Base", "alpha.Trt", "alpha.BT",
"alpha.Age", "alpha.V4", "sigma.b1", "sigma.b"),
n.iter=10000, thin=10)
source("bench-test2.R")
check.fun()
model in epil3.bug
data in epil-data.R
load glm
compile, nchains(2)
inits in epil-inits.R
initialize
update 1000
monitor alpha0, thin(10)
monitor alpha.Base, thin(10)
monitor alpha.Trt, thin(10)
monitor alpha.BT, thin(10)
monitor alpha.Age, thin(10)
monitor alpha.V4, thin(10)
monitor sigma.b1, thin(10)
monitor sigma.b, thin(10)
update 10000
coda *
It is not currently possible to do the tranformed version of the EQUIV
model in JAGS, since it uses a symmetric order constraint, namely
tau[1] ~ dgamma(0.001, 0.001) I(tau[3],);
tau[3] ~ dgamma(0.001, 0.001) I(,tau[1]);
This construction is not allowed in JAGS because it creates a directed
cycle in the model.
"benchstats" <-
structure(c(0.9940944383, 0.9985, 0.107993561989999, 0.140971934020000,
0.0503853244807424, 0.038702710369934, 0.0318072686978664, 0.0522499033983419,
0.000503853244807424, 0.00038702710369934, 0.000318072686978664,
0.000522499033983419, 0.000496560273546467, 0.000386045689209867,
0.000546941378452105, 0.000806677796773217), .Dim = as.integer(c(4,
4)), .Dimnames = list(c("theta", "equivalence", "sigma[1]", "sigma[2]"
), c("Mean", "SD", "Naive SE", "Time-series SE")))
"benchstats" <-
structure(c(0.9792509085, 0.985, 0.137716297340000, 0.123616081710000,
1.37818835210000, 1.56343460819999, 1.4338874202, 0.0726088470261219,
0.121558535890182, 0.0430866436963967, 0.0593885108107869, 0.191581993694522,
0.186234532854242, 0.203312014471999, 0.000726088470261219, 0.00121558535890182,
0.000430866436963967, 0.000593885108107869, 0.00191581993694522,
0.00186234532854242, 0.00203312014471999, 0.000823965642919049,
0.00131637436275614, 0.000720241017507236, 0.00100258690820864,
0.00216617370586762, 0.00205346443299852, 0.00258485093374419
), .Dim = as.integer(c(7, 4)), .Dimnames = list(c("theta", "equivalence",
"sigma[1]", "sigma[2]", "Y[1,1]", "Y[3,2]", "Y[6,2]"), c("Mean",
"SD", "Naive SE", "Time-series SE")))
"Y" <-
structure(c(1.4, 1.64, 1.44, 1.36, 1.65, 1.08, 1.09, 1.25, 1.25,
1.3, 1.65, 1.57, 1.58, 1.68, 1.69, 1.31, 1.43, 1.44, 1.39, 1.52
), .Dim = c(10, 2))
"group" <-
c(1, 1, -1, -1, -1, 1, 1, 1, -1, -1)
"N" <- 10
"T" <- 2
"mu" <- 0
"phi" <- 0
"pi" <- 0
"tau" <- c(1,1)
model {
# Original model
for (i in 1:N) {
d[i] ~ dnorm(0,tau[2]); # Subject random effect
for (k in 1:2){
Treat[i,k] <- group[i]*(k-1.5) + 1.5; # treatment given
Y[i,k] ~ dnorm(m[i,k], tau[1]);
m[i,k] <- mu + pow(-1, Treat[i,k]-1)* phi /2
+ pow(-1, k-1)* pi /2 + d[i];
}
}
tau[1] ~ dgamma(0.001, 0.001);
tau[2] ~ dgamma(0.001, 0.001);
sigma[1] <- sqrt(1/tau[1]);
sigma[2] <- sqrt(1/tau[2]);
pi ~ dnorm(0, 1.0E-06);
phi ~ dnorm(0, 1.0E-06);
mu ~ dnorm(0, 1.0E-06);
theta <- exp(phi);
# Indicate whether 0.8 < theta < 1.2
equivalence <- step(theta - 0.8) - step(theta - 1.2);
}
"Y" <-
structure(c(NA, 1.64, 1.44, 1.36, 1.65, 1.08, 1.09, 1.25, 1.25,
1.3, 1.65, 1.57, NA, 1.68, 1.69, NA, 1.43, 1.44, 1.39, 1.52), .Dim = c(10,
2))
"group" <-
c(1, 1, -1, -1, -1, 1, 1, 1, -1, -1)
"N" <- 10
"T" <- 2
source("../../R/Rcheck.R")
d <- read.jagsdata("equiv-data.R")
d$T <- NULL #Variable T not used in this model
m <- jags.model("equiv.bug", d, n.chains=2)
check.data(m, d)
update(m, 1000)
x <- coda.samples(m, c("theta","equivalence","sigma"), n.iter=10000)
source("bench-test1.R")
check.fun()
model in equiv.bug
data in equiv-data.R
compile, nchains(2)
inits in equiv-init.R
initialize
update 1000
monitor theta
monitor equivalence
monitor sigma
update 10000
coda *
model in "equiv.bug"
data in "equivmiss-data.R"
compile, nchains(2)
inits in "equiv-init.R"
initialize
update 1000
monitor theta
monitor equivalence
monitor sigma
monitor Y[1,1]
monitor Y[3,2]
monitor Y[6,2]
update 10000
coda *
benchstats <-
structure(c(0.7046534675, 3.920348015, 5.25480163, 1.0638459225,
0.2471005675665, 0.166132473742, -0.23814697059455, 1.2090262215,
0.139216565001681, 0.33341926533294, 0.477453932494927, 0.320612187910867,
0.240939821670762, 0.225193279144865, 0.192649274299991, 0.25064438130318,
0.00311297702937778, 0.00745548142292493, 0.0106761944918325,
0.00716910646583635, 0.005387578197425, 0.00503547480244004,
0.00430776873150783, 0.00560457874772288, 0.0031130612634594,
0.00745230497584645, 0.0108330211397763, 0.00716545258483729,
0.0054805174540052, 0.00578128603634509, 0.00419117549720276,
0.00628197722158713), .Dim = c(8L, 4L), .Dimnames = list(c("a[1]",
"a[2]", "a[3]", "beta", "kappa", "log.sigma", "pi", "sigma"),
c("Mean", "SD", "Naive SE", "Time-series SE")))
benchstats <-
structure(c(0.7046534675, 3.920348015, 5.25480163, 1.0638459225,
0.2471005675665, 0.166132473742, -0.23814697059455, 1.2090262215,
0.139216565001681, 0.33341926533294, 0.477453932494927, 0.320612187910867,
0.240939821670762, 0.225193279144865, 0.192649274299991, 0.25064438130318,
0.00311297702937778, 0.00745548142292493, 0.0106761944918325,
0.00716910646583635, 0.005387578197425, 0.00503547480244004,
0.00430776873150783, 0.00560457874772288, 0.0031130612634594,
0.00745230497584645, 0.0108330211397763, 0.00716545258483729,
0.0054805174540052, 0.00578128603634509, 0.00419117549720276,
0.00628197722158713), .Dim = c(8L, 4L), .Dimnames = list(c("a[1]",
"a[2]", "a[3]", "beta", "kappa", "log.sigma", "pi", "sigma"),
c("Mean", "SD", "Naive SE", "Time-series SE")))
benchstats <-
structure(c(0.721357114, 3.965218875, 5.3324775, 1.0791242478,
0.2400732192965, 0.217095560541, -0.24324180743205, 1.269013238,
0.145006586494057, 0.343032594458916, 0.49639016449619, 0.32643938466003,
0.253088589288329, 0.210362758583484, 0.203148269152192, 0.252580920385133,
0.00324244584585914, 0.00767044199708253, 0.0110996215117578,
0.0072994065463303, 0.00565923289978228, 0.00470385428127047,
0.00454253339335724, 0.00564788107800619, 0.00377961311214598,
0.0105740721976009, 0.013276537859806, 0.00788446070128391, 0.00595868534770646,
0.00821025498628612, 0.00453693690225141, 0.00964642904943395
), .Dim = c(8L, 4L), .Dimnames = list(c("a[1]", "a[2]", "a[3]",
"beta", "kappa", "log.sigma", "pi", "sigma"), c("Mean", "SD",
"Naive SE", "Time-series SE")))
"N" <-
286
"T" <-
2
"G" <-
2
"Npattern" <-
16
"Ncut" <-
3
"pattern" <-
structure(c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 1,
2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4), .Dim = as.integer(c(16,
2)))
"Ncum" <-
structure(c(59, 157, 173, 175, 186, 253, 270, 271, 271, 278,
280, 281, 282, 285, 285, 286, 122, 170, 173, 175, 226, 268, 270,
271, 278, 280, 281, 281, 284, 285, 286, 286), .Dim = as.integer(c(16,
2)))
"treat" <-
structure(c(1, -1, -1, 1), .Dim = as.integer(c(2, 2)))
"period" <-
structure(c(1, 1, -1, -1), .Dim = as.integer(c(2, 2)))
"carry" <-
structure(c(0, 0, -1, 1), .Dim = as.integer(c(2, 2)))
"beta" <-
0
"pi" <-
0
"kappa" <-
0
"a" <-
c(2, 3, 4)
"beta" <-
0
"pi" <-
0
"kappa" <-
0
"a" <-
c(2, 3, 4)
"beta" <-
0
"pi" <-
0
"kappa" <-
0
"a0" <-
c(2, 3, 4)
"tau" <-
1
/* This is the original form of the inhaler example in which the ordered
logistic distribution is constructed in bugs code. This is superseded
by the dordered distribution in the glm module from JAGS 3.4.0.
*/
var
pattern[Npattern,T], # response pattern e.g. (1,1) or (2,4) etc.
Ncum[Npattern,T], # cumulative total
response[N,T], # response for patient i in period t
p[N,T,(Ncut+1)], # prob of response j for patient i in period t
Q[N,T,Ncut], # cumulative prob of response worse than j
# for patient i in period t
group[N], # treatment group (1=AB; 2=BA)
mu[G,T], # logistic mean for group g & period t
treat[2,T], beta, # treatment effect
period[2,T], pi, # period effect
carry[2,T], kappa, # carryover effect
a[Ncut], # cut points for latent response variable
b[N], # subject random effect
tau, # precision of subject effects
sigma, log.sigma;
data {
# Construct individual response data from contingency table
for (i in 1:Ncum[1,1]) {
group[i] <- 1; for (t in 1:T) { response[i,t] <- pattern[1,t] }
}
for (i in (Ncum[1,1]+1):Ncum[1,2]) {
group[i] <- 2; for (t in 1:T) { response[i,t] <- pattern[1,t] }
}
for (k in 2:Npattern) {
for(i in (Ncum[k-1,2]+1):Ncum[k,1]) {
group[i] <- 1; for (t in 1:T) { response[i,t] <- pattern[k,t] }
}
for(i in (Ncum[k,1]+1):Ncum[k,2]) {
group[i] <- 2; for (t in 1:T) { response[i,t] <- pattern[k,t] }
}
}
}
model {
for (i in 1:N) {
for (t in 1:T) {
for (j in 1:Ncut) {
#
# Cumulative probability of worse response than j
#
logit(Q[i,t,j]) <- -(a[j] + mu[group[i],t] + b[i]);
}
#
# Probability of response = j
#
p[i,t,1] <- 1 - Q[i,t,1];
for (j in 2:Ncut) { p[i,t,j] <- Q[i,t,j-1] - Q[i,t,j] }
p[i,t,(Ncut+1)] <- Q[i,t,Ncut];
response[i,t] ~ dcat(p[i,t,]);
}
#
# Subject (random) effects
#
b[i] ~ dnorm(0.0, tau);
}
#
# Fixed effects
#
for (g in 1:G) {
for(t in 1:T) {
# logistic mean for group i in period t
mu[g,t] <- beta*treat[g,t]/2 + pi*period[g,t]/2 + kappa*carry[g,t];
}
}
beta ~ dnorm(0, 1.0E-06);
pi ~ dnorm(0, 1.0E-06);
kappa ~ dnorm(0, 1.0E-06);
# ordered cut points for underlying continuous latent variable
a <- sort(a0)
for(i in 1:3) {
a0[i] ~ dnorm(0, 1.0E-6);
}
tau ~ dgamma(0.001, 0.001);
sigma <- sqrt(1/tau);
log.sigma <- log(sigma);
}
var
pattern[Npattern,T], # response pattern e.g. (1,1) or (2,4) etc.
Ncum[Npattern,T], # cumulative total
response[N,T], # response for patient i in period t
p[N,T,(Ncut+1)], # prob of response j for patient i in period t
Q[N,T,Ncut], # cumulative prob of response worse than j
# for patient i in period t
group[N], # treatment group (1=AB; 2=BA)
mu[G,T], # logistic mean for group g & period t
treat[2,T], beta, # treatment effect
period[2,T], pi, # period effect
carry[2,T], kappa, # carryover effect
a[Ncut], # cut points for latent response variable
b[N], # subject random effect
tau, # precision of subject effects
sigma, log.sigma;
data {
# Construct individual response data from contingency table
for (i in 1:Ncum[1,1]) {
group[i] <- 1; for (t in 1:T) { response[i,t] <- pattern[1,t] }
}
for (i in (Ncum[1,1]+1):Ncum[1,2]) {
group[i] <- 2; for (t in 1:T) { response[i,t] <- pattern[1,t] }
}
for (k in 2:Npattern) {
for(i in (Ncum[k-1,2]+1):Ncum[k,1]) {
group[i] <- 1; for (t in 1:T) { response[i,t] <- pattern[k,t] }
}
for(i in (Ncum[k,1]+1):Ncum[k,2]) {
group[i] <- 2; for (t in 1:T) { response[i,t] <- pattern[k,t] }
}
}
}
model {
for (i in 1:N) {
for (t in 1:T) {
response[i,t] ~ dordered.logit(-mu[group[i],t] + b[i], a[1:Ncut])
}
# Random effects
b[i] ~ dnorm(0.0, tau);
}
#
# Fixed effects
#
for (g in 1:G) {
for(t in 1:T) {
# logistic mean for group i in period t
mu[g,t] <- beta*treat[g,t]/2 + pi*period[g,t]/2 + kappa*carry[g,t];
}
}
beta ~ dnorm(0, 1.0E-06);
pi ~ dnorm(0, 1.0E-06);
kappa ~ dnorm(0, 1.0E-06);
# ordered cut points for underlying continuous latent variable
for(i in 1:3) {
a[i] ~ dnorm(0, 1.0E-6);
}
tau ~ dgamma(1.0E-3, 1.0E-3);
sigma <- sqrt(1/tau);
log.sigma <- log(sigma);
}
var
pattern[Npattern,T], # response pattern e.g. (1,1) or (2,4) etc.
Ncum[Npattern,T], # cumulative total
response[N,T], # response for patient i in period t
p[N,T,(Ncut+1)], # prob of response j for patient i in period t
Q[N,T,Ncut], # cumulative prob of response worse than j
# for patient i in period t
group[N], # treatment group (1=AB; 2=BA)
mu[G,T], # logistic mean for group g & period t
treat[2,T], beta, # treatment effect
period[2,T], pi, # period effect
carry[2,T], kappa, # carryover effect
a[Ncut], # cut points for latent response variable
b[N], # subject random effect
tau, # precision of subject effects
sigma, log.sigma;
data {
# Construct individual response data from contingency table
for (i in 1:Ncum[1,1]) {
group[i] <- 1; for (t in 1:T) { response[i,t] <- pattern[1,t] }
}
for (i in (Ncum[1,1]+1):Ncum[1,2]) {
group[i] <- 2; for (t in 1:T) { response[i,t] <- pattern[1,t] }
}
for (k in 2:Npattern) {
for(i in (Ncum[k-1,2]+1):Ncum[k,1]) {
group[i] <- 1; for (t in 1:T) { response[i,t] <- pattern[k,t] }
}
for(i in (Ncum[k,1]+1):Ncum[k,2]) {
group[i] <- 2; for (t in 1:T) { response[i,t] <- pattern[k,t] }
}
}
}
model {
for (i in 1:N) {
for (t in 1:T) {
response[i,t] ~ dordered.logit(-mu[group[i],t] + b[i], a[1:Ncut])
}
# Random effects
b[i] ~ dnorm(0.0, tau);
}
#
# Fixed effects
#
for (g in 1:G) {
for(t in 1:T) {
# logistic mean for group i in period t
mu[g,t] <- beta*treat[g,t]/2 + pi*period[g,t]/2 + kappa*carry[g,t];
}
}
beta ~ dnorm(0, 1.0E-06);
pi ~ dnorm(0, 1.0E-06);
kappa ~ dnorm(0, 1.0E-06);
# ordered cut points for underlying continuous latent variable
for(i in 1:3) {
a[i] ~ dnorm(0, 1.0E-6);
}
tau ~ dscaled.gamma(10, 2);
sigma <- sqrt(1/tau);
log.sigma <- log(sigma);
}
source("../../R/Rcheck.R")
d <- read.jagsdata("inhaler-data.R")
inits <- read.jagsdata("inhaler0-inits.R")
m <- jags.model("inhaler.bug", d, inits, n.chains=2)
## Check data consistency, skipping variables created in data block
check.data(m, d, skip=c("group", "response"))
update(m, 1000)
x <- coda.samples(m, c("a","beta","kappa","log.sigma","pi","sigma"),
n.iter=10000, thin=10)
source("bench-test1.R")
check.fun()
/*
The original formulation of the inhaler example shows poor mixing and
requires a long run with a long thinning interval to obtain good
estimates of the posterior distribution.
*/
model in inhaler0.bug
data in inhaler-data.R
compile, nchains(2)
parameters in inhaler0-inits.R
initialize
update 10000
monitor a, thin(100)
monitor beta, thin(100)
monitor kappa, thin(100)
monitor log.sigma, thin(100)
monitor pi, thin(100)
monitor sigma, thin(100)
update 100000
coda *
exit
source("../../R/Rcheck.R")
d <- read.jagsdata("inhaler-data.R")
inits <- read.jagsdata("inhaler-inits.R")
load.module("glm")
m <- jags.model("inhaler1.bug", d, inits, n.chains=2)
names(list.samplers(m))
update(m, 1000)
x <- coda.samples(m, c("a","beta","kappa","log.sigma","pi","sigma"),
n.iter=20000, thin=10)
source("bench-test1.R")
check.fun()
model in inhaler1.bug
data in inhaler-data.R
load glm
compile, nchains(2)
parameters in inhaler-inits.R
initialize
update 1000
monitor a, thin(10)
monitor beta, thin(10)
monitor kappa, thin(10)
monitor log.sigma, thin(10)
monitor pi, thin(10)
monitor sigma, thin(10)
update 10000
coda *
exit
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment