diff --git a/docs/aterm.md b/docs/aterm.md
index 02eed501f8c0702040ef06bd046f9e38b8d11af2..cb9d035062decec19443ad5fb719ad1c2fb39d89 100644
--- a/docs/aterm.md
+++ b/docs/aterm.md
@@ -1,54 +1,54 @@
-# aterm
-
-**aterm** (or **aterm.cmd** for Windows) script provides a command line Mediaflux terminal. Via **aterm** you can execute any Mediaflux service or built-in commands. (e.g. **download**, **import**, **source**)
-It requires file: **aterm.jar**, which can be downloaded by this script automatically. It also requires the configruation file: **mflux.cfg**, which includes the Mediaflux server details and user credentials.
-
-## 1. Configuration
-
-### aterm.jar
-* This script downloads **aterm.jar** automatically to $HOME/.Arcitecta directory, if $MFLUX_ATERM environment variable is not set or aterm.jar is not found.
-* You can set environment variable to specify a different location before execute this script:
-  * on Linux/Mac OS/Unix, modify aterm file:
-    * **export MFLUX_ATERM=/path/to/aterm.jar**
-  * on Windows, modify aterm.cmd file:
-    * **SET MFLUX_ATERM=x:\path\to\aterm.jar**
-
-### mflux.cfg
-* You **must** specify the Mediaflux server details and user credentails (or secure identity token) in a **mflux.cfg** file.
-* the script will try the following locations to find **mflux.cfg**:
-  1. use the file specified by **$MFLUX_CFG** (or **%MFLUX_CFG%** on Windows) if the file exists;
-  2. use the file in **$HOME/.Arcitecta/mflux.cfg** (or **%USERPROFILE%/Arcitecta/mflux.cfg**) if the file exists
-  3. use the file **mflux.cfg** in the script directory if exists.
-
-## 2. Examples
-
-### 2.1. Enter command line terminal, simply 
-* On Mac OS, Linux or Unix:
-  * **./aterm**
-* On Windows:
-  * **aterm.cmd**
-* Type **quit** to quit the terminal.
-### 2.2. Execute a Mediaflux service:
-* On Mac OS, Linux or Unix:
-  * **./aterm server.uuid**
-* On Windows:
-  * **aterm.cmd server.uuid**
-### 2.3. Execute download command:
-* On Mac OS, Linux or Unix:
-  * **./aterm download -namespace /projects/my-project /Users/wilson/Downloads**
-* On Windows:
-  * **aterm.cmd download -namespace /projects/my-project c:\users\wilson\Downloads**
-### 2.4. Execute import command:
-* On Mac OS, Linux or Unix:
-  * **./aterm import -namespace /projects/my-project /Users/wilson/dir-to-upload**
-* On Windows:
-  * **aterm.cmd import -namespace /projects/my-project c:\users\wilson\dir-to-upload**
-
-## 3. Other scripts
-
-* **NOTE:** 
-  * **aterm-download** is equivalent to 
-    * `aterm download`
-  * **aterm-import** is equivalent to 
-    * `aterm import`
-
+# aterm
+
+**aterm** (or **aterm.cmd** for Windows) script provides a command line Mediaflux terminal. Via **aterm** you can execute any Mediaflux service or built-in commands. (e.g. **download**, **import**, **source**)
+It requires file: **aterm.jar**, which can be downloaded by this script automatically. It also requires the configruation file: **mflux.cfg**, which includes the Mediaflux server details and user credentials.
+
+## 1. Configuration
+
+### aterm.jar
+* This script downloads **aterm.jar** automatically to $HOME/.Arcitecta directory, if $MFLUX_ATERM environment variable is not set or aterm.jar is not found.
+* You can set environment variable to specify a different location before execute this script:
+  * on Linux/Mac OS/Unix, modify aterm file:
+    * **export MFLUX_ATERM=/path/to/aterm.jar**
+  * on Windows, modify aterm.cmd file:
+    * **SET MFLUX_ATERM=x:\path\to\aterm.jar**
+
+### mflux.cfg
+* You **must** specify the Mediaflux server details and user credentails (or secure identity token) in a **mflux.cfg** file.
+* the script will try the following locations to find **mflux.cfg**:
+  1. use the file specified by **$MFLUX_CFG** (or **%MFLUX_CFG%** on Windows) if the file exists;
+  2. use the file in **$HOME/.Arcitecta/mflux.cfg** (or **%USERPROFILE%/Arcitecta/mflux.cfg**) if the file exists
+  3. use the file **mflux.cfg** in the script directory if exists.
+
+## 2. Examples
+
+### 2.1. Enter command line terminal, simply 
+* On Mac OS, Linux or Unix:
+  * **./aterm**
+* On Windows:
+  * **aterm.cmd**
+* Type **quit** to quit the terminal.
+### 2.2. Execute a Mediaflux service:
+* On Mac OS, Linux or Unix:
+  * **./aterm server.uuid**
+* On Windows:
+  * **aterm.cmd server.uuid**
+### 2.3. Execute download command:
+* On Mac OS, Linux or Unix:
+  * **./aterm download -namespace /projects/my-project /Users/wilson/Downloads**
+* On Windows:
+  * **aterm.cmd download -namespace /projects/my-project c:\users\wilson\Downloads**
+### 2.4. Execute import command:
+* On Mac OS, Linux or Unix:
+  * **./aterm import -namespace /projects/my-project /Users/wilson/dir-to-upload**
+* On Windows:
+  * **aterm.cmd import -namespace /projects/my-project c:\users\wilson\dir-to-upload**
+
+## 3. Other scripts
+
+* **NOTE:** 
+  * **aterm-download** is equivalent to 
+    * `aterm download`
+  * **aterm-import** is equivalent to 
+    * `aterm import`
+
diff --git a/docs/scp-get.md b/docs/scp-get.md
index 41e6d7c90f08cb7ac0b821264e8418479353ed86..d2e17d90fb7eb68e1147256e0ffc91df0d8f4a0f 100644
--- a/docs/scp-get.md
+++ b/docs/scp-get.md
@@ -1,32 +1,32 @@
-```
-USAGE:
-    scp-get <mediaflux-arguments> <scp-arguments>
-
-DESCRIPTION:
-    Import files from remote SSH server to Mediaflux using scp.
-
-MEDIAFLUX ARGUMENTS:
-    --mf.host <host>                      Mediaflux server host.
-    --mf.port <port>                      Mediaflux server port.
-    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
-    --mf.auth <domain,user,password>      Mediaflux user credentials.
-    --mf.token <token>                    Mediaflux secure identity token.
-    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
-    --mf.namespace <dst-namespace>        Destination namespace on Mediaflux.
-    --mf.readonly                         Set the assets to be read-only.
-    --mf.worm                             Set the assets to WORM state.
-    --mf.worm.expiry <d-MMM-yyyy>         Set the assets WORM expiry date.
-
-SCP ARGUMENTS:
-    --ssh.host <host>                     SSH server host.
-    --ssh.port <port>                     SSH server port. Optional. Defaults to 22.
-    --ssh.user <username>                 SSH user name.
-    --ssh.password <password>             SSH user's password.
-    --ssh.private-key <private-key>       SSH user's private key.
-    --ssh.passphrase <passphrase>         Passphrase for the SSH user's private key.
-    --ssh.path <src-path>                 Source path on remote SSH server.
-
-EXAMPLES:
-    The command below imports files from scp server into the specified Mediaflux asset namespace:
-         scp-get --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/dst-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.path path/to/src-directory
-```
+```
+USAGE:
+    scp-get <mediaflux-arguments> <scp-arguments>
+
+DESCRIPTION:
+    Import files from remote SSH server to Mediaflux using scp.
+
+MEDIAFLUX ARGUMENTS:
+    --mf.host <host>                      Mediaflux server host.
+    --mf.port <port>                      Mediaflux server port.
+    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
+    --mf.auth <domain,user,password>      Mediaflux user credentials.
+    --mf.token <token>                    Mediaflux secure identity token.
+    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
+    --mf.namespace <dst-namespace>        Destination namespace on Mediaflux.
+    --mf.readonly                         Set the assets to be read-only.
+    --mf.worm                             Set the assets to WORM state.
+    --mf.worm.expiry <d-MMM-yyyy>         Set the assets WORM expiry date.
+
+SCP ARGUMENTS:
+    --ssh.host <host>                     SSH server host.
+    --ssh.port <port>                     SSH server port. Optional. Defaults to 22.
+    --ssh.user <username>                 SSH user name.
+    --ssh.password <password>             SSH user's password.
+    --ssh.private-key <private-key>       SSH user's private key.
+    --ssh.passphrase <passphrase>         Passphrase for the SSH user's private key.
+    --ssh.path <src-path>                 Source path on remote SSH server.
+
+EXAMPLES:
+    The command below imports files from scp server into the specified Mediaflux asset namespace:
+         scp-get --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/dst-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.path path/to/src-directory
+```
diff --git a/docs/scp-put.md b/docs/scp-put.md
index 4889a06fb3942cfe15332419822c38009360c6b8..f4176a3e0f81fb5b48fa5610a0408299e12260c2 100644
--- a/docs/scp-put.md
+++ b/docs/scp-put.md
@@ -1,30 +1,30 @@
-```
-USAGE:
-    scp-put <mediaflux-arguments> <scp-arguments>
-
-DESCRIPTION:
-    Export Mediaflux assets to remote SSH server using scp.
-
-MEDIAFLUX ARGUMENTS:
-    --mf.host <host>                      Mediaflux server host.
-    --mf.port <port>                      Mediaflux server port.
-    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
-    --mf.auth <domain,user,password>      Mediaflux user credentials.
-    --mf.token <token>                    Mediaflux secure identity token.
-    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
-    --mf.namespace <src-namespace>        Source namespace on Mediaflux.
-    --mf.unarchive                        Unpack asset contents.
-
-SCP ARGUMENTS:
-    --ssh.host <host>                     SSH server host.
-    --ssh.port <port>                     SSH server port. Optional. Defaults to 22.
-    --ssh.user <username>                 SSH user name.
-    --ssh.password <password>             SSH user's password.
-    --ssh.private-key <private-key>       SSH user's private key.
-    --ssh.passphrase <passphrase>         Passphrase for the SSH user's private key.
-    --ssh.directory <dst-directory>       Destination directory on remote SSH server.
-
-EXAMPLES:
-    The command below exports assets from the specified Mediaflux asset namespace to remote scp server:
-        scp-put --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/src-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.directory path/to/dst-directory
- ```
+```
+USAGE:
+    scp-put <mediaflux-arguments> <scp-arguments>
+
+DESCRIPTION:
+    Export Mediaflux assets to remote SSH server using scp.
+
+MEDIAFLUX ARGUMENTS:
+    --mf.host <host>                      Mediaflux server host.
+    --mf.port <port>                      Mediaflux server port.
+    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
+    --mf.auth <domain,user,password>      Mediaflux user credentials.
+    --mf.token <token>                    Mediaflux secure identity token.
+    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
+    --mf.namespace <src-namespace>        Source namespace on Mediaflux.
+    --mf.unarchive                        Unpack asset contents.
+
+SCP ARGUMENTS:
+    --ssh.host <host>                     SSH server host.
+    --ssh.port <port>                     SSH server port. Optional. Defaults to 22.
+    --ssh.user <username>                 SSH user name.
+    --ssh.password <password>             SSH user's password.
+    --ssh.private-key <private-key>       SSH user's private key.
+    --ssh.passphrase <passphrase>         Passphrase for the SSH user's private key.
+    --ssh.directory <dst-directory>       Destination directory on remote SSH server.
+
+EXAMPLES:
+    The command below exports assets from the specified Mediaflux asset namespace to remote scp server:
+        scp-put --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/src-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.directory path/to/dst-directory
+ ```
diff --git a/docs/sftp-get.md b/docs/sftp-get.md
index b9726100974b97f361cc1e250e5c18f4db2b8c87..706f8bf2d0aa65751aee8798da41dc186a74c93d 100644
--- a/docs/sftp-get.md
+++ b/docs/sftp-get.md
@@ -1,32 +1,32 @@
-```
-USAGE:
-    sftp-get <mediaflux-arguments> <sftp-arguments>
-
-DESCRIPTION:
-    Import files from remote SFTP server to Mediaflux using sftp.
-
-MEDIAFLUX ARGUMENTS:
-    --mf.host <host>                      Mediaflux server host.
-    --mf.port <port>                      Mediaflux server port.
-    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
-    --mf.auth <domain,user,password>      Mediaflux user credentials.
-    --mf.token <token>                    Mediaflux secure identity token.
-    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
-    --mf.namespace <dst-namespace>        Destination namespace on Mediaflux.
-    --mf.readonly                         Set the assets to be read-only.
-    --mf.worm                             Set the assets to WORM state.
-    --mf.worm.expiry <d-MMM-yyyy>         Set the assets WORM expiry date.
-
-SFTP ARGUMENTS:
-    --ssh.host <host>                     SFTP server host.
-    --ssh.port <port>                     SFTP server port. Optional. Defaults to 22.
-    --ssh.user <username>                 SFTP user name.
-    --ssh.password <password>             SFTP user's password.
-    --ssh.private-key <private-key>       SFTP user's private key.
-    --ssh.passphrase <passphrase>         Passphrase for the SFTP user's private key.
-    --ssh.path <src-path>                 Source path on remote SFTP server.
-
-EXAMPLES:
-    The command below imports files from sftp server into the specified Mediaflux asset namespace:
-         sftp-get --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/dst-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.path path/to/src-directory
-```
+```
+USAGE:
+    sftp-get <mediaflux-arguments> <sftp-arguments>
+
+DESCRIPTION:
+    Import files from remote SFTP server to Mediaflux using sftp.
+
+MEDIAFLUX ARGUMENTS:
+    --mf.host <host>                      Mediaflux server host.
+    --mf.port <port>                      Mediaflux server port.
+    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
+    --mf.auth <domain,user,password>      Mediaflux user credentials.
+    --mf.token <token>                    Mediaflux secure identity token.
+    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
+    --mf.namespace <dst-namespace>        Destination namespace on Mediaflux.
+    --mf.readonly                         Set the assets to be read-only.
+    --mf.worm                             Set the assets to WORM state.
+    --mf.worm.expiry <d-MMM-yyyy>         Set the assets WORM expiry date.
+
+SFTP ARGUMENTS:
+    --ssh.host <host>                     SFTP server host.
+    --ssh.port <port>                     SFTP server port. Optional. Defaults to 22.
+    --ssh.user <username>                 SFTP user name.
+    --ssh.password <password>             SFTP user's password.
+    --ssh.private-key <private-key>       SFTP user's private key.
+    --ssh.passphrase <passphrase>         Passphrase for the SFTP user's private key.
+    --ssh.path <src-path>                 Source path on remote SFTP server.
+
+EXAMPLES:
+    The command below imports files from sftp server into the specified Mediaflux asset namespace:
+         sftp-get --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/dst-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.path path/to/src-directory
+```
diff --git a/docs/sftp-put.md b/docs/sftp-put.md
index f46f5ac22fbee61353951bae1eba8445401f9c5d..daf02c56428df15c5d6e53cc1709a7b8f67f75e2 100644
--- a/docs/sftp-put.md
+++ b/docs/sftp-put.md
@@ -1,30 +1,30 @@
-```
-USAGE:
-    sftp-put <mediaflux-arguments> <sftp-arguments>
-
-DESCRIPTION:
-    Export Mediaflux assets to remote SFTP server using sftp.
-
-MEDIAFLUX ARGUMENTS:
-    --mf.host <host>                      Mediaflux server host.
-    --mf.port <port>                      Mediaflux server port.
-    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
-    --mf.auth <domain,user,password>      Mediaflux user credentials.
-    --mf.token <token>                    Mediaflux secure identity token.
-    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
-    --mf.namespace <src-namespace>        Source namespace on Mediaflux.
-    --mf.unarchive                        Unpack asset contents.
-
-SFTP ARGUMENTS:
-    --ssh.host <host>                     SFTP server host.
-    --ssh.port <port>                     SFTP server port. Optional. Defaults to 22.
-    --ssh.user <username>                 SFTP user name.
-    --ssh.password <password>             SFTP user's password.
-    --ssh.private-key <private-key>       SFTP user's private key.
-    --ssh.passphrase <passphrase>         Passphrase for the SFTP user's private key.
-    --ssh.directory <dst-directory>       Destination directory on remote SFTP server.
-
-EXAMPLES:
-    The command below exports assets from the specified Mediaflux asset namespace to remote sftp server:
-        sftp-put --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/src-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.directory path/to/dst-directory
-```
+```
+USAGE:
+    sftp-put <mediaflux-arguments> <sftp-arguments>
+
+DESCRIPTION:
+    Export Mediaflux assets to remote SFTP server using sftp.
+
+MEDIAFLUX ARGUMENTS:
+    --mf.host <host>                      Mediaflux server host.
+    --mf.port <port>                      Mediaflux server port.
+    --mf.transport <https|http|tcp/ip>    Mediaflux server transport, can be http, https or tcp/ip.
+    --mf.auth <domain,user,password>      Mediaflux user credentials.
+    --mf.token <token>                    Mediaflux secure identity token.
+    --mf.async                            Executes the job in the background. The background service can be checked by executing service.background.describe service in Mediaflux Aterm.
+    --mf.namespace <src-namespace>        Source namespace on Mediaflux.
+    --mf.unarchive                        Unpack asset contents.
+
+SFTP ARGUMENTS:
+    --ssh.host <host>                     SFTP server host.
+    --ssh.port <port>                     SFTP server port. Optional. Defaults to 22.
+    --ssh.user <username>                 SFTP user name.
+    --ssh.password <password>             SFTP user's password.
+    --ssh.private-key <private-key>       SFTP user's private key.
+    --ssh.passphrase <passphrase>         Passphrase for the SFTP user's private key.
+    --ssh.directory <dst-directory>       Destination directory on remote SFTP server.
+
+EXAMPLES:
+    The command below exports assets from the specified Mediaflux asset namespace to remote sftp server:
+        sftp-put --mf.host mediaflux.your-domain.org --mf.port 443 --mf.transport 443 --mf.auth mf_domain,mf_user,MF_PASSWD --mf.namespace /path/to/src-namespace --ssh.host ssh-server.your-domain.org --ssh.port 22 --ssh.user ssh_username --ssh.password SSH_PASSWD --ssh.directory path/to/dst-directory
+```
diff --git a/src/main/config/samples/mf-download-config.xml b/src/main/config/samples/mf-download-config.xml
index 32b65d4befbbd566ce1e51198f04828534d462bb..cfe24ccde9761affe085567e4c170fabebbd081b 100644
--- a/src/main/config/samples/mf-download-config.xml
+++ b/src/main/config/samples/mf-download-config.xml
@@ -1,81 +1,81 @@
-<?xml version="1.0"?>
-<properties>
-	<server>
-		<!-- Mediaflux server host -->
-		<host>mediaflux.researchsoftware.unimelb.edu.au</host>
-		<!-- Mediaflux server port -->
-		<port>443</port>
-		<!-- Mediaflux server transport. https, http or tcp/ip -->
-		<transport>https</transport>
-		<session>
-			<!-- Retry times on Mediaflux connection failure -->
-			<connectRetryTimes>1</connectRetryTimes>
-			<!-- Time interval (in milliseconds) between retries -->
-			<connectRetryInterval>1000</connectRetryInterval>
-			<!-- Retry times on service execution error -->
-			<executeRetryTimes>1</executeRetryTimes>
-			<!-- Time interval (in milliseconds) between retries -->
-			<executeRetryInterval>1000</executeRetryInterval>
-		</session>
-	</server>
-	<credential>
-		<!-- Application name. Can be used as application key for secure identity 
-			token -->
-		<app>unimelb-mf-download</app>
-		<!-- Mediaflux user's authentication domain -->
-		<domain>DOMAIN</domain>
-		<!-- Mediaflux username -->
-		<user>USERNAME</user>
-		<!-- Mediaflux user's password -->
-		<password>PASSWD</password>
-		<!-- Mediaflux secure identity token -->
-		<token>XXXYYYZZZ</token>
-	</credential>
-	<sync>
-		<settings>
-			<!-- Number of query threads to compare local files with remote assets -->
-			<numberOfQueriers>2</numberOfQueriers>
-			<!-- Number of workers/threads to transfer (upload/download) data concurrently -->
-			<numberOfWorkers>4</numberOfWorkers>
-			<!-- Batch size for checking files with remote assets. Set to 1 will check 
-				files one by one, which will slow down significantly when there are large 
-				number of small files. -->
-			<batchSize>1000</batchSize>
-			<!-- Compare CRC32 checksum after uploading -->
-			<csumCheck>true</csumCheck>
-            <!-- Overwrite if destination file exists (when downloading)-->
-            <overwrite>false</overwrite>
-            <!-- Extract archive content (when downloading). 
-                Set to none to disable it;
-                Set to aar to extract only aar files; 
-                Set to all to extract all supported archive files (zip/tar/aar).-->
-            <unarchive>none</unarchive>
-			<!-- Running a daemon in background to scan for local file system changes 
-				periodically. -->
-			<daemon enabled="true">
-                <!-- Listener port. -->
-				<listenerPort>9761</listenerPort>
-				<!-- Time interval (in milliseconds) between scans -->
-				<scanInterval>60000</scanInterval>
-			</daemon>
-			<!-- Log directory location -->
-			<logDirectory>.</logDirectory>
-			<!-- Exclude empty directories for uploading -->
-			<excludeEmptyFolder>true</excludeEmptyFolder>
-            <!-- Verbose -->
-            <verbose>false</verbose>
-			<!-- Send notifications when jobs complete (Non-daemon mode only) -->
-			<notification>
-				<!-- The email recipients. Can be multiple. -->
-				<email>admin@your-domain.org</email>
-			</notification>
-		</settings>
-		<!-- download jobs -->
-		<job action="download">
-			<!-- dst directory -->
-			<directory parent="true">/path/to/dst-parent-directory</directory>
-            <!-- src asset namespace -->
-            <namespace>/path/to/src-ns</namspace>
-		</job>
-	</sync>
-</properties>
+<?xml version="1.0"?>
+<properties>
+	<server>
+		<!-- Mediaflux server host -->
+		<host>mediaflux.researchsoftware.unimelb.edu.au</host>
+		<!-- Mediaflux server port -->
+		<port>443</port>
+		<!-- Mediaflux server transport. https, http or tcp/ip -->
+		<transport>https</transport>
+		<session>
+			<!-- Retry times on Mediaflux connection failure -->
+			<connectRetryTimes>1</connectRetryTimes>
+			<!-- Time interval (in milliseconds) between retries -->
+			<connectRetryInterval>1000</connectRetryInterval>
+			<!-- Retry times on service execution error -->
+			<executeRetryTimes>1</executeRetryTimes>
+			<!-- Time interval (in milliseconds) between retries -->
+			<executeRetryInterval>1000</executeRetryInterval>
+		</session>
+	</server>
+	<credential>
+		<!-- Application name. Can be used as application key for secure identity 
+			token -->
+		<app>unimelb-mf-download</app>
+		<!-- Mediaflux user's authentication domain -->
+		<domain>DOMAIN</domain>
+		<!-- Mediaflux username -->
+		<user>USERNAME</user>
+		<!-- Mediaflux user's password -->
+		<password>PASSWD</password>
+		<!-- Mediaflux secure identity token -->
+		<token>XXXYYYZZZ</token>
+	</credential>
+	<sync>
+		<settings>
+			<!-- Number of query threads to compare local files with remote assets -->
+			<numberOfQueriers>2</numberOfQueriers>
+			<!-- Number of workers/threads to transfer (upload/download) data concurrently -->
+			<numberOfWorkers>4</numberOfWorkers>
+			<!-- Batch size for checking files with remote assets. Set to 1 will check 
+				files one by one, which will slow down significantly when there are large 
+				number of small files. -->
+			<batchSize>1000</batchSize>
+			<!-- Compare CRC32 checksum after uploading -->
+			<csumCheck>true</csumCheck>
+            <!-- Overwrite if destination file exists (when downloading)-->
+            <overwrite>false</overwrite>
+            <!-- Extract archive content (when downloading). 
+                Set to none to disable it;
+                Set to aar to extract only aar files; 
+                Set to all to extract all supported archive files (zip/tar/aar).-->
+            <unarchive>none</unarchive>
+			<!-- Running a daemon in background to scan for local file system changes 
+				periodically. -->
+			<daemon enabled="true">
+                <!-- Listener port. -->
+				<listenerPort>9761</listenerPort>
+				<!-- Time interval (in milliseconds) between scans -->
+				<scanInterval>60000</scanInterval>
+			</daemon>
+			<!-- Log directory location -->
+			<logDirectory>.</logDirectory>
+			<!-- Exclude empty directories for uploading -->
+			<excludeEmptyFolder>true</excludeEmptyFolder>
+            <!-- Verbose -->
+            <verbose>false</verbose>
+			<!-- Send notifications when jobs complete (Non-daemon mode only) -->
+			<notification>
+				<!-- The email recipients. Can be multiple. -->
+				<email>admin@your-domain.org</email>
+			</notification>
+		</settings>
+		<!-- download jobs -->
+		<job action="download">
+			<!-- dst directory -->
+			<directory parent="true">/path/to/dst-parent-directory</directory>
+            <!-- src asset namespace -->
+            <namespace>/path/to/src-ns</namspace>
+		</job>
+	</sync>
+</properties>
diff --git a/src/main/config/samples/mf-perf-config.xml b/src/main/config/samples/mf-perf-config.xml
index 51c8c1aa06d1bd2613ef1143ce4dd8e2b345d4d5..31e8f61f5e1654a41b493ad56233718c8870f465 100644
--- a/src/main/config/samples/mf-perf-config.xml
+++ b/src/main/config/samples/mf-perf-config.xml
@@ -1,37 +1,37 @@
-<?xml version="1.0"?>
-<properties>
-	<server>
-		<host>mediaflux.yourdomain.org</host>
-		<port>443</port>
-		<transport>https</transport>
-	</server>
-	<credential>
-		<domain>DOMAIN</domain>
-		<user>USER</user>
-		<password>PASSWORD</password>
-	</credential>
-	<perf>
-		<verbose>true</verbose>
-		<logDirectory>/tmp</logDirectory>
-		<test>
-			<action>ping</action>
-			<action>upload</action>
-			<action>download</action>
-			<numberOfThreads>1</numberOfThreads>
-			<useClusterIO>true</useClusterIO>
-			<useInMemoryFile>false</useInMemoryFile>
-			<numberOfFiles>10</numberOfFiles>
-			<fileSize>10000000</fileSize>
-			<namespace>/mf-perf-test</namespace>
-			<directory>/tmp/mf-perf-test</directory>
-		</test>
-		<result>
-			<asset>
-				<path>/test-result/mf-perf-result.csv</path>
-			</asset>
-			<file>
-				<path>/tmp/mf-perf-result.csv</path>
-			</file>
-		</result>
-	</perf>
-</properties>
+<?xml version="1.0"?>
+<properties>
+	<server>
+		<host>mediaflux.yourdomain.org</host>
+		<port>443</port>
+		<transport>https</transport>
+	</server>
+	<credential>
+		<domain>DOMAIN</domain>
+		<user>USER</user>
+		<password>PASSWORD</password>
+	</credential>
+	<perf>
+		<verbose>true</verbose>
+		<logDirectory>/tmp</logDirectory>
+		<test>
+			<action>ping</action>
+			<action>upload</action>
+			<action>download</action>
+			<numberOfThreads>1</numberOfThreads>
+			<useClusterIO>true</useClusterIO>
+			<useInMemoryFile>false</useInMemoryFile>
+			<numberOfFiles>10</numberOfFiles>
+			<fileSize>10000000</fileSize>
+			<namespace>/mf-perf-test</namespace>
+			<directory>/tmp/mf-perf-test</directory>
+		</test>
+		<result>
+			<asset>
+				<path>/test-result/mf-perf-result.csv</path>
+			</asset>
+			<file>
+				<path>/tmp/mf-perf-result.csv</path>
+			</file>
+		</result>
+	</perf>
+</properties>
diff --git a/src/main/config/samples/mf-upload-config.xml b/src/main/config/samples/mf-upload-config.xml
index bf843b6b36820375f73b781a099593b05607f8d8..5bb9b61364e311c03dd37c352f5fbc3cc361cf00 100644
--- a/src/main/config/samples/mf-upload-config.xml
+++ b/src/main/config/samples/mf-upload-config.xml
@@ -1,101 +1,101 @@
-<?xml version="1.0"?>
-<properties>
-	<server>
-		<!-- Mediaflux server host -->
-		<host>mediaflux.researchsoftware.unimelb.edu.au</host>
-		<!-- Mediaflux server port -->
-		<port>443</port>
-		<!-- Mediaflux server transport. https, http or tcp/ip -->
-		<transport>https</transport>
-		<session>
-			<!-- Retry times on Mediaflux connection failure -->
-			<connectRetryTimes>1</connectRetryTimes>
-			<!-- Time interval (in milliseconds) between retries -->
-			<connectRetryInterval>1000</connectRetryInterval>
-			<!-- Retry times on service execution error -->
-			<executeRetryTimes>1</executeRetryTimes>
-			<!-- Time interval (in milliseconds) between retries -->
-			<executeRetryInterval>1000</executeRetryInterval>
-		</session>
-	</server>
-	<credential>
-		<!-- Application name. Can be used as application key for secure identity 
-			token -->
-		<app>unimelb-mf-upload</app>
-		<!-- Mediaflux user's authentication domain -->
-		<domain>DOMAIN</domain>
-		<!-- Mediaflux username -->
-		<user>USERNAME</user>
-		<!-- Mediaflux user's password -->
-		<password>PASSWD</password>
-		<!-- Mediaflux secure identity token -->
-		<token>XXXYYYZZZ</token>
-	</credential>
-	<sync>
-		<settings>
-			<!-- Number of query threads to compare local files with remote assets -->
-			<numberOfQueriers>2</numberOfQueriers>
-			<!-- Number of workers/threads to transfer (upload/download) data concurrently -->
-			<numberOfWorkers>4</numberOfWorkers>
-			<!-- Batch size for checking files with remote assets. Set to 1 will check 
-				files one by one, which will slow down significantly when there are large 
-				number of small files. -->
-			<batchSize>1000</batchSize>
-			<!-- Compare CRC32 checksum after uploading -->
-			<csumCheck>true</csumCheck>
-			<!-- Overwrite if destination file exists (when downloading) -->
-			<overwrite>false</overwrite>
-			<!-- Extract archive content (when downloading). Set to none to disable 
-				it; Set to aar to extract only aar files; Set to all to extract all supported 
-				archive files (zip/tar/aar). -->
-			<unarchive>none</unarchive>
-			<!-- Running a daemon in background to scan for local file system changes 
-				periodically. -->
-			<daemon enabled="true">
-				<!-- Listener port. -->
-				<listenerPort>9761</listenerPort>
-				<!-- Time interval (in milliseconds) between scans -->
-				<scanInterval>60000</scanInterval>
-			</daemon>
-			<!-- Worm -->
-			<worm>
-			    <enable>true</enable>
-				<canAddVersions>false</canAddVersions>
-				<canMove>true</canMove>
-			</worm>
-			<!-- Log directory location -->
-			<logDirectory>.</logDirectory>
-			<!-- Exclude empty directories for uploading -->
-			<excludeEmptyFolder>true</excludeEmptyFolder>
-			<!-- Verbose -->
-			<verbose>false</verbose>
-			<!-- Send notifications when jobs complete (Non-daemon mode only) -->
-			<notification>
-				<!-- The email recipients. Can be multiple. -->
-				<email>admin@your-domain.org</email>
-			</notification>
-		</settings>
-		<!-- Upload jobs -->
-		<job action="upload">
-			<!-- source directory -->
-			<directory>/path/to/src-directory1</directory>
-			<namespace parent="true">/path/to/dst-parent-ns</namespace>
-			<!-- Exclude empty directories for uploading -->
-			<excludeEmptyFolder>true</excludeEmptyFolder>
-			<!-- The filter below excludes all .class files. -->
-			<exclude>**/*.class</exclude>
-		</job>
-		<job action="upload">
-			<!-- source directory -->
-			<directory>/path/to/src-directory2</directory>
-			<!-- destination asset namespace -->
-			<namespace parent="false">/path/to/dst-namespace2</namespace>
-			<!-- inclusive filter. The filter below select all sub-directories' name 
-				start with wilson under the source directory, and all their descendants. -->
-			<include>wilson*/**</include>
-			<!-- exclusive filter. The filter below excludes all files with name: 
-				.DS_store -->
-			<exclude>**/.DS_Store</exclude>
-		</job>
-	</sync>
-</properties>
+<?xml version="1.0"?>
+<properties>
+	<server>
+		<!-- Mediaflux server host -->
+		<host>mediaflux.researchsoftware.unimelb.edu.au</host>
+		<!-- Mediaflux server port -->
+		<port>443</port>
+		<!-- Mediaflux server transport. https, http or tcp/ip -->
+		<transport>https</transport>
+		<session>
+			<!-- Retry times on Mediaflux connection failure -->
+			<connectRetryTimes>1</connectRetryTimes>
+			<!-- Time interval (in milliseconds) between retries -->
+			<connectRetryInterval>1000</connectRetryInterval>
+			<!-- Retry times on service execution error -->
+			<executeRetryTimes>1</executeRetryTimes>
+			<!-- Time interval (in milliseconds) between retries -->
+			<executeRetryInterval>1000</executeRetryInterval>
+		</session>
+	</server>
+	<credential>
+		<!-- Application name. Can be used as application key for secure identity 
+			token -->
+		<app>unimelb-mf-upload</app>
+		<!-- Mediaflux user's authentication domain -->
+		<domain>DOMAIN</domain>
+		<!-- Mediaflux username -->
+		<user>USERNAME</user>
+		<!-- Mediaflux user's password -->
+		<password>PASSWD</password>
+		<!-- Mediaflux secure identity token -->
+		<token>XXXYYYZZZ</token>
+	</credential>
+	<sync>
+		<settings>
+			<!-- Number of query threads to compare local files with remote assets -->
+			<numberOfQueriers>2</numberOfQueriers>
+			<!-- Number of workers/threads to transfer (upload/download) data concurrently -->
+			<numberOfWorkers>4</numberOfWorkers>
+			<!-- Batch size for checking files with remote assets. Set to 1 will check 
+				files one by one, which will slow down significantly when there are large 
+				number of small files. -->
+			<batchSize>1000</batchSize>
+			<!-- Compare CRC32 checksum after uploading -->
+			<csumCheck>true</csumCheck>
+			<!-- Overwrite if destination file exists (when downloading) -->
+			<overwrite>false</overwrite>
+			<!-- Extract archive content (when downloading). Set to none to disable 
+				it; Set to aar to extract only aar files; Set to all to extract all supported 
+				archive files (zip/tar/aar). -->
+			<unarchive>none</unarchive>
+			<!-- Running a daemon in background to scan for local file system changes 
+				periodically. -->
+			<daemon enabled="true">
+				<!-- Listener port. -->
+				<listenerPort>9761</listenerPort>
+				<!-- Time interval (in milliseconds) between scans -->
+				<scanInterval>60000</scanInterval>
+			</daemon>
+			<!-- Worm -->
+			<worm>
+			    <enable>true</enable>
+				<canAddVersions>false</canAddVersions>
+				<canMove>true</canMove>
+			</worm>
+			<!-- Log directory location -->
+			<logDirectory>.</logDirectory>
+			<!-- Exclude empty directories for uploading -->
+			<excludeEmptyFolder>true</excludeEmptyFolder>
+			<!-- Verbose -->
+			<verbose>false</verbose>
+			<!-- Send notifications when jobs complete (Non-daemon mode only) -->
+			<notification>
+				<!-- The email recipients. Can be multiple. -->
+				<email>admin@your-domain.org</email>
+			</notification>
+		</settings>
+		<!-- Upload jobs -->
+		<job action="upload">
+			<!-- source directory -->
+			<directory>/path/to/src-directory1</directory>
+			<namespace parent="true">/path/to/dst-parent-ns</namespace>
+			<!-- Exclude empty directories for uploading -->
+			<excludeEmptyFolder>true</excludeEmptyFolder>
+			<!-- The filter below excludes all .class files. -->
+			<exclude>**/*.class</exclude>
+		</job>
+		<job action="upload">
+			<!-- source directory -->
+			<directory>/path/to/src-directory2</directory>
+			<!-- destination asset namespace -->
+			<namespace parent="false">/path/to/dst-namespace2</namespace>
+			<!-- inclusive filter. The filter below select all sub-directories' name 
+				start with wilson under the source directory, and all their descendants. -->
+			<include>wilson*/**</include>
+			<!-- exclusive filter. The filter below excludes all files with name: 
+				.DS_store -->
+			<exclude>**/.DS_Store</exclude>
+		</job>
+	</sync>
+</properties>
diff --git a/src/main/java/unimelb/mf/client/sync/check/AssetItem.java b/src/main/java/unimelb/mf/client/sync/check/AssetItem.java
index 446cb41f77397f1caf885bad8a22870c69c3875b..0c7b662729d7881cae44d69853bb092b064b0d37 100644
--- a/src/main/java/unimelb/mf/client/sync/check/AssetItem.java
+++ b/src/main/java/unimelb/mf/client/sync/check/AssetItem.java
@@ -1,108 +1,108 @@
-package unimelb.mf.client.sync.check;
-
-import arc.xml.XmlDoc;
-import unimelb.mf.model.asset.SymlinkAsset;
-import unimelb.utils.ChecksumUtils.ChecksumType;
-import unimelb.utils.PathUtils;
-
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-public class AssetItem implements Item {
-
-    private Map<ChecksumType, String> _checksums;
-    private long _length;
-
-    private String _assetId;
-    private String _assetPath;
-    private String _assetNamespace;
-    private String _symlinkTarget;
-
-    private String _baseANS;
-
-    public AssetItem(String assetPath, String baseAssetNamespace, long length, String checksum,
-                     ChecksumType checksumType, String symlinkTarget) {
-        if (checksum != null && checksumType != null) {
-            _checksums = new LinkedHashMap<ChecksumType, String>();
-            _checksums.put(checksumType, checksum);
-        }
-        _length = length;
-        _assetId = null;
-        _assetPath = assetPath;
-        _assetNamespace = PathUtils.getParentPath(_assetPath);
-        _baseANS = baseAssetNamespace;
-        _symlinkTarget = symlinkTarget;
-    }
-
-    public AssetItem(XmlDoc.Element ae, String baseAssetNamespace) throws Throwable {
-        _checksums = new LinkedHashMap<ChecksumType, String>();
-        _assetId = ae.value("@id");
-        _assetPath = ae.value("path");
-        _assetNamespace = ae.value("namespace");
-        boolean isSymlinkAsset = SymlinkAsset.isSymlinkAsset(ae);
-        if (!ae.elementExists("content") && !isSymlinkAsset) {
-            throw new Exception("No content is found on asset " + _assetId);
-        }
-        _length = ae.longValue("content/size", -1);
-        String crc32 = ae.value("content/csum[@base='16']");
-        if (crc32 != null) {
-            _checksums.put(ChecksumType.CRC32, crc32);
-        }
-        _baseANS = baseAssetNamespace;
-        _symlinkTarget = SymlinkAsset.getSymlinkTarget(ae);
-    }
-
-    protected void setBaseNamespace(String baseAssetNamespace) {
-        _baseANS = baseAssetNamespace;
-    }
-
-    @Override
-    public final long length() {
-        return _length;
-    }
-
-    @Override
-    public final Map<ChecksumType, String> checksums() {
-        if (_checksums != null) {
-            return Collections.unmodifiableMap(_checksums);
-        }
-        return null;
-    }
-
-    public final String assetNamespace() {
-        return _assetNamespace;
-    }
-
-    public final String assetId() {
-        return _assetId;
-    }
-
-    public final String assetPath() {
-        return _assetPath;
-    }
-
-    @Override
-    public final String fullPath() {
-        return assetPath();
-    }
-
-    @Override
-    public final String basePath() {
-        return _baseANS;
-    }
-
-    public final String baseNamespace() {
-        return _baseANS;
-    }
-
-    @Override
-    public final Type type() {
-        return Type.ASSET;
-    }
-
-    public String symlinkTarget() {
-        return _symlinkTarget;
-    }
-
-}
+package unimelb.mf.client.sync.check;
+
+import arc.xml.XmlDoc;
+import unimelb.mf.model.asset.SymlinkAsset;
+import unimelb.utils.ChecksumUtils.ChecksumType;
+import unimelb.utils.PathUtils;
+
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+public class AssetItem implements Item {
+
+    private Map<ChecksumType, String> _checksums;
+    private long _length;
+
+    private String _assetId;
+    private String _assetPath;
+    private String _assetNamespace;
+    private String _symlinkTarget;
+
+    private String _baseANS;
+
+    public AssetItem(String assetPath, String baseAssetNamespace, long length, String checksum,
+                     ChecksumType checksumType, String symlinkTarget) {
+        if (checksum != null && checksumType != null) {
+            _checksums = new LinkedHashMap<ChecksumType, String>();
+            _checksums.put(checksumType, checksum);
+        }
+        _length = length;
+        _assetId = null;
+        _assetPath = assetPath;
+        _assetNamespace = PathUtils.getParentPath(_assetPath);
+        _baseANS = baseAssetNamespace;
+        _symlinkTarget = symlinkTarget;
+    }
+
+    public AssetItem(XmlDoc.Element ae, String baseAssetNamespace) throws Throwable {
+        _checksums = new LinkedHashMap<ChecksumType, String>();
+        _assetId = ae.value("@id");
+        _assetPath = ae.value("path");
+        _assetNamespace = ae.value("namespace");
+        boolean isSymlinkAsset = SymlinkAsset.isSymlinkAsset(ae);
+        if (!ae.elementExists("content") && !isSymlinkAsset) {
+            throw new Exception("No content is found on asset " + _assetId);
+        }
+        _length = ae.longValue("content/size", -1);
+        String crc32 = ae.value("content/csum[@base='16']");
+        if (crc32 != null) {
+            _checksums.put(ChecksumType.CRC32, crc32);
+        }
+        _baseANS = baseAssetNamespace;
+        _symlinkTarget = SymlinkAsset.getSymlinkTarget(ae);
+    }
+
+    protected void setBaseNamespace(String baseAssetNamespace) {
+        _baseANS = baseAssetNamespace;
+    }
+
+    @Override
+    public final long length() {
+        return _length;
+    }
+
+    @Override
+    public final Map<ChecksumType, String> checksums() {
+        if (_checksums != null) {
+            return Collections.unmodifiableMap(_checksums);
+        }
+        return null;
+    }
+
+    public final String assetNamespace() {
+        return _assetNamespace;
+    }
+
+    public final String assetId() {
+        return _assetId;
+    }
+
+    public final String assetPath() {
+        return _assetPath;
+    }
+
+    @Override
+    public final String fullPath() {
+        return assetPath();
+    }
+
+    @Override
+    public final String basePath() {
+        return _baseANS;
+    }
+
+    public final String baseNamespace() {
+        return _baseANS;
+    }
+
+    @Override
+    public final Type type() {
+        return Type.ASSET;
+    }
+
+    public String symlinkTarget() {
+        return _symlinkTarget;
+    }
+
+}
diff --git a/src/main/java/unimelb/mf/client/sync/check/FileItem.java b/src/main/java/unimelb/mf/client/sync/check/FileItem.java
index 556d364c0d49ab75f39f78ccd8a52159e785c3b8..96fe99e4ef33723dcb6d18a36686e1dbe4d7bcc7 100644
--- a/src/main/java/unimelb/mf/client/sync/check/FileItem.java
+++ b/src/main/java/unimelb/mf/client/sync/check/FileItem.java
@@ -1,76 +1,76 @@
-package unimelb.mf.client.sync.check;
-
-import java.nio.file.Path;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-import unimelb.utils.PathUtils;
-import unimelb.utils.ChecksumUtils.ChecksumType;
-public class FileItem implements Item {
-
-
-    private Path _baseDir;
-
-    private Path _file;
-
-    private long _length = -1;
-
-    private Map<ChecksumType, String> _checksums;
-
-    FileItem(Path file, Path baseDir) {
-        _file = file.toAbsolutePath();
-        _baseDir = baseDir.toAbsolutePath();
-        _length = file.toFile().length();
-    }
-
-    protected void setChecksum(ChecksumType checksumType, String checksum) {
-        if (_checksums == null) {
-            _checksums = new LinkedHashMap<ChecksumType, String>();
-        }
-        _checksums.put(checksumType, checksum);
-    }
-
-    protected void setBaseDirectory(Path baseDir) {
-        _baseDir = baseDir;
-    }
-
-    @Override
-    public final long length() {
-        if (_length < 0) {
-            _length = _file.toFile().length();
-        }
-        return _length;
-    }
-
-    @Override
-    public final Map<ChecksumType, String> checksums() {
-        if (_checksums != null) {
-            return Collections.unmodifiableMap(_checksums);
-        }
-        return null;
-    }
-
-    @Override
-    public final String fullPath() {
-        return PathUtils.toSystemIndependent(_file.toAbsolutePath().toString());
-    }
-
-    @Override
-    public final String basePath() {
-        if (_baseDir != null) {
-            return PathUtils.toSystemIndependent(_baseDir.toAbsolutePath().toString());
-        }
-        return null;
-    }
-
-    public final Path baseDirectory() {
-        return _baseDir;
-    }
-
-    @Override
-    public final Type type() {
-        return Type.FILE;
-    }
-
-}
+package unimelb.mf.client.sync.check;
+
+import java.nio.file.Path;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import unimelb.utils.PathUtils;
+import unimelb.utils.ChecksumUtils.ChecksumType;
+public class FileItem implements Item {
+
+
+    private Path _baseDir;
+
+    private Path _file;
+
+    private long _length = -1;
+
+    private Map<ChecksumType, String> _checksums;
+
+    FileItem(Path file, Path baseDir) {
+        _file = file.toAbsolutePath();
+        _baseDir = baseDir.toAbsolutePath();
+        _length = file.toFile().length();
+    }
+
+    protected void setChecksum(ChecksumType checksumType, String checksum) {
+        if (_checksums == null) {
+            _checksums = new LinkedHashMap<ChecksumType, String>();
+        }
+        _checksums.put(checksumType, checksum);
+    }
+
+    protected void setBaseDirectory(Path baseDir) {
+        _baseDir = baseDir;
+    }
+
+    @Override
+    public final long length() {
+        if (_length < 0) {
+            _length = _file.toFile().length();
+        }
+        return _length;
+    }
+
+    @Override
+    public final Map<ChecksumType, String> checksums() {
+        if (_checksums != null) {
+            return Collections.unmodifiableMap(_checksums);
+        }
+        return null;
+    }
+
+    @Override
+    public final String fullPath() {
+        return PathUtils.toSystemIndependent(_file.toAbsolutePath().toString());
+    }
+
+    @Override
+    public final String basePath() {
+        if (_baseDir != null) {
+            return PathUtils.toSystemIndependent(_baseDir.toAbsolutePath().toString());
+        }
+        return null;
+    }
+
+    public final Path baseDirectory() {
+        return _baseDir;
+    }
+
+    @Override
+    public final Type type() {
+        return Type.FILE;
+    }
+
+}
diff --git a/src/main/java/unimelb/mf/client/sync/check/HasChecksum.java b/src/main/java/unimelb/mf/client/sync/check/HasChecksum.java
index 9a8481c8f1c054058aa4d96f610da829becad97f..fa429fabfd9e4e9a3d1d531b77c8ee361ddb69c6 100644
--- a/src/main/java/unimelb/mf/client/sync/check/HasChecksum.java
+++ b/src/main/java/unimelb/mf/client/sync/check/HasChecksum.java
@@ -1,48 +1,48 @@
-package unimelb.mf.client.sync.check;
-
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-import unimelb.utils.ChecksumUtils.ChecksumType;
-public interface HasChecksum {
-
-	default String checksum(ChecksumType type) {
-		Map<ChecksumType, String> checksums = checksums();
-		if (checksums != null) {
-			return checksums.get(type);
-		}
-
-		return null;
-	}
-
-	default boolean checksumEquals(ChecksumType type, HasChecksum o) {
-		if (o != null) {
-			String csum1 = checksum(type);
-			String csum2 = o.checksum(type);
-			return (csum1 != null && csum2 != null && csum1.equalsIgnoreCase(csum2));
-		}
-		return false;
-	}
-
-	default boolean anyChecksumMatches(HasChecksum o) {
-		Map<ChecksumType, String> checksums1 = checksums();
-		Map<ChecksumType, String> checksums2 = o.checksums();
-		if (checksums1 != null && checksums2 != null) {
-			Set<ChecksumType> types1 = checksums1.keySet();
-			Set<ChecksumType> types2 = checksums2.keySet();
-			Set<ChecksumType> types = new LinkedHashSet<ChecksumType>(types1);
-			types.retainAll(types2);
-			if (!types.isEmpty()) {
-				for (ChecksumType type : types) {
-					String checksum1 = checksums1.get(type);
-					String checksum2 = checksums2.get(type);
-					return checksum1 != null && checksum2 != null && checksum1.equalsIgnoreCase(checksum2);
-				}
-			}
-		}
-		return false;
-	}
-
-	Map<ChecksumType, String> checksums();
-
-}
+package unimelb.mf.client.sync.check;
+
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+import unimelb.utils.ChecksumUtils.ChecksumType;
+public interface HasChecksum {
+
+	default String checksum(ChecksumType type) {
+		Map<ChecksumType, String> checksums = checksums();
+		if (checksums != null) {
+			return checksums.get(type);
+		}
+
+		return null;
+	}
+
+	default boolean checksumEquals(ChecksumType type, HasChecksum o) {
+		if (o != null) {
+			String csum1 = checksum(type);
+			String csum2 = o.checksum(type);
+			return (csum1 != null && csum2 != null && csum1.equalsIgnoreCase(csum2));
+		}
+		return false;
+	}
+
+	default boolean anyChecksumMatches(HasChecksum o) {
+		Map<ChecksumType, String> checksums1 = checksums();
+		Map<ChecksumType, String> checksums2 = o.checksums();
+		if (checksums1 != null && checksums2 != null) {
+			Set<ChecksumType> types1 = checksums1.keySet();
+			Set<ChecksumType> types2 = checksums2.keySet();
+			Set<ChecksumType> types = new LinkedHashSet<ChecksumType>(types1);
+			types.retainAll(types2);
+			if (!types.isEmpty()) {
+				for (ChecksumType type : types) {
+					String checksum1 = checksums1.get(type);
+					String checksum2 = checksums2.get(type);
+					return checksum1 != null && checksum2 != null && checksum1.equalsIgnoreCase(checksum2);
+				}
+			}
+		}
+		return false;
+	}
+
+	Map<ChecksumType, String> checksums();
+
+}
diff --git a/src/main/java/unimelb/mf/client/sync/check/HasContent.java b/src/main/java/unimelb/mf/client/sync/check/HasContent.java
index 28da5cf6a18701663fa530d7c162831e82cbd827..19699fbbecea9ef665c06ccf7c502b62cae70637 100644
--- a/src/main/java/unimelb/mf/client/sync/check/HasContent.java
+++ b/src/main/java/unimelb/mf/client/sync/check/HasContent.java
@@ -1,15 +1,15 @@
-package unimelb.mf.client.sync.check;
-
-import unimelb.utils.ChecksumUtils;
-
-public interface HasContent extends HasLength, HasChecksum {
-
-	default boolean contentEquals(HasContent o, ChecksumUtils.ChecksumType checksumType) {
-		if (checksumType != null) {
-			return lengthEquals(o) && checksumEquals(checksumType, o);
-		} else {
-			return lengthEquals(o);
-		}
-	}
-
-}
+package unimelb.mf.client.sync.check;
+
+import unimelb.utils.ChecksumUtils;
+
+public interface HasContent extends HasLength, HasChecksum {
+
+	default boolean contentEquals(HasContent o, ChecksumUtils.ChecksumType checksumType) {
+		if (checksumType != null) {
+			return lengthEquals(o) && checksumEquals(checksumType, o);
+		} else {
+			return lengthEquals(o);
+		}
+	}
+
+}
diff --git a/src/main/java/unimelb/mf/client/sync/check/HasContextPath.java b/src/main/java/unimelb/mf/client/sync/check/HasContextPath.java
index 7f9bd8e93d86201a37a2a91039c7f0d3479f3e79..d1aabdca4e5738a1c5dc819d0b248ac81f50d9fc 100644
--- a/src/main/java/unimelb/mf/client/sync/check/HasContextPath.java
+++ b/src/main/java/unimelb/mf/client/sync/check/HasContextPath.java
@@ -1,28 +1,28 @@
-package unimelb.mf.client.sync.check;
-
-import unimelb.utils.PathUtils;
-
-public interface HasContextPath {
-
-	String fullPath();
-
-	default String relativePath() {
-		String base = basePath();
-		String path = fullPath();
-		if (base != null && path != null) {
-			return PathUtils.getRelativePathSI(basePath(), fullPath());
-		}
-		return null;
-	}
-	
-	default String name() {
-		String path = fullPath();
-		if(path!=null) {
-			return PathUtils.getFileName(path);
-		}
-		return null;
-	}
-
-	String basePath();
-
-}
+package unimelb.mf.client.sync.check;
+
+import unimelb.utils.PathUtils;
+
+public interface HasContextPath {
+
+	String fullPath();
+
+	default String relativePath() {
+		String base = basePath();
+		String path = fullPath();
+		if (base != null && path != null) {
+			return PathUtils.getRelativePathSI(basePath(), fullPath());
+		}
+		return null;
+	}
+	
+	default String name() {
+		String path = fullPath();
+		if(path!=null) {
+			return PathUtils.getFileName(path);
+		}
+		return null;
+	}
+
+	String basePath();
+
+}
diff --git a/src/main/java/unimelb/mf/client/sync/check/HasLength.java b/src/main/java/unimelb/mf/client/sync/check/HasLength.java
index 78ec1029971260464a6b625be9c7817bc2ca6056..da412bf9ed9e85005642560ff5ff9a4995fd8b91 100644
--- a/src/main/java/unimelb/mf/client/sync/check/HasLength.java
+++ b/src/main/java/unimelb/mf/client/sync/check/HasLength.java
@@ -1,16 +1,16 @@
-package unimelb.mf.client.sync.check;
-
-public interface HasLength {
-
-	long length();
-
-	default boolean lengthEquals(HasLength o) {
-		if (o != null) {
-			if (length() >= 0 && o.length() >= 0) {
-				return length() == o.length();
-			}
-		}
-		return false;
-	}
-
-}
+package unimelb.mf.client.sync.check;
+
+public interface HasLength {
+
+	long length();
+
+	default boolean lengthEquals(HasLength o) {
+		if (o != null) {
+			if (length() >= 0 && o.length() >= 0) {
+				return length() == o.length();
+			}
+		}
+		return false;
+	}
+
+}
diff --git a/src/main/java/unimelb/mf/client/sync/check/Item.java b/src/main/java/unimelb/mf/client/sync/check/Item.java
index 8ed66bc5c68cfdd14b39f30076ef3312fe8477dc..6aaf6f91df15639b6d868cf476270fb891cf0bf2 100644
--- a/src/main/java/unimelb/mf/client/sync/check/Item.java
+++ b/src/main/java/unimelb/mf/client/sync/check/Item.java
@@ -1,14 +1,14 @@
-package unimelb.mf.client.sync.check;
-
-public interface Item extends HasContent, HasContextPath {
-
-    public static enum Type {
-        ASSET, FILE
-    }
-
-    Type type();
-
-    default String typeName() {
-        return type().name().toLowerCase();
-    }
-}
+package unimelb.mf.client.sync.check;
+
+public interface Item extends HasContent, HasContextPath {
+
+    public static enum Type {
+        ASSET, FILE
+    }
+
+    Type type();
+
+    default String typeName() {
+        return type().name().toLowerCase();
+    }
+}
diff --git a/src/main/java/unimelb/mf/client/sync/settings/Settings.java b/src/main/java/unimelb/mf/client/sync/settings/Settings.java
index 212f71f1fa740092e818de7a41037e645560a981..e32458522ad5d5c99a39d06bc7a74f7491e6bb10 100644
--- a/src/main/java/unimelb/mf/client/sync/settings/Settings.java
+++ b/src/main/java/unimelb/mf/client/sync/settings/Settings.java
@@ -1,714 +1,714 @@
-package unimelb.mf.client.sync.settings;
-
-import java.io.BufferedReader;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.Reader;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-
-import arc.xml.XmlDoc;
-import unimelb.mf.client.session.MFSession;
-import unimelb.mf.client.sync.check.CheckHandler;
-import unimelb.mf.client.sync.task.AssetDownloadTask;
-import unimelb.mf.client.sync.task.AssetDownloadTask.Unarchive;
-import unimelb.mf.client.task.MFApp;
-
-public class Settings implements MFApp.Settings {
-
-	public static final int DEFAULT_DAEMON_LISTENER_PORT = 9761;
-	public static final long DEFAULT_DAEMON_SCAN_INTERVAL = 60000L;
-	public static final int DEFAULT_NUM_OF_QUERIERS = 1;
-	public static final int DEFAULT_NUM_OF_WORKERS = 1;
-	public static final int DEFAULT_BATCH_SIZE = 1000;
-	public static final int DEFAULT_MAX_RETRIES = 0;
-	public static final int DEFAULT_LOG_FILE_SIZE_MB = 100; // 100MB
-	public static final int MIN_LOG_FILE_SIZE_MB = 1;// 1MB
-	public static final int MAX_LOG_FILE_SIZE_MB = 2000;// 2000MB
-	public static final int DEFAULT_LOG_FILE_COUNT = 2;
-	public static final int MIN_LOG_FILE_COUNT = 1;
-	public static final int MAX_LOG_FILE_COUNT = 10;
-	public static final long DEFAULT_AGGREGATE_UPLOAD_THRESHOLD = 1000000L;
-	public static final long DEFAULT_AGGREGATE_DOWNLOAD_THRESHOLD = 1000000L;
-	public static final long DEFAULT_SPLIT_THRESHOLD = 1000000000L; // 1GB
-
-	private List<Job> _jobs;
-	private Path _logDir = null;
-	private int _logFileSizeMB = DEFAULT_LOG_FILE_SIZE_MB;
-	private int _logFileCount = DEFAULT_LOG_FILE_COUNT;
-
-	private int _nbQueriers = DEFAULT_NUM_OF_QUERIERS;
-	private int _nbWorkers = DEFAULT_NUM_OF_WORKERS;
-
-	private boolean _daemon = false;
-	private int _daemonListenerPort = DEFAULT_DAEMON_LISTENER_PORT;
-	private long _daemonScanInterval = DEFAULT_DAEMON_SCAN_INTERVAL;
-
-	private int _batchSize = DEFAULT_BATCH_SIZE;
-
-	private boolean _createNamespaces;
-	private boolean _csumCheck;
-
-	private boolean _includeMetadata = false; // download metadata xml
-
-	private long _aggregateDownloadThreshold = 0L;
-
-	private long _aggregateUploadThreshold = 0L;
-
-	private long _splitThreshold = Long.MAX_VALUE;
-
-	private int _maxRetries = DEFAULT_MAX_RETRIES; // Number of retries...
-
-	/*
-	 * download settings
-	 */
-	private boolean _overwrite = false;
-	private AssetDownloadTask.Unarchive _unarchive = AssetDownloadTask.Unarchive.NONE;
-
-	/*
-	 * upload settings;
-	 */
-	private boolean _excludeEmptyFolder = true;
-
-	/*
-	 * check settings
-	 */
-	private CheckHandler _checkHandler = null;
-
-	private boolean _verbose = false;
-
-	private Set<String> _recipients;
-
-	private boolean _deleteFiles = false;
-
-	private boolean _deleteAssets = false;
-
-	private boolean _hardDestroyAssets = false;
-
-	// follow symbolic links when uploading
-	private boolean _followSymlinks = false;
-
-	// try restoring symbolic links when downloading
-	private boolean _restoreSymlinks = true;
-
-	// exclude parent if the source path ends with trailing slash
-	private boolean _excludeParent = false;
-
-	private boolean _worm = false;
-
-	private boolean _wormCanAddVersions = false;
-
-	private boolean _wormCanMove = true;
-
-	public Settings() {
-		_jobs = new ArrayList<Job>();
-	}
-
-	public int batchSize() {
-		return _batchSize;
-	}
-
-	public void setBatchSize(int batchSize) {
-		if (batchSize < 1) {
-			batchSize = DEFAULT_BATCH_SIZE;
-		}
-		_batchSize = batchSize;
-	}
-
-	public Path logDirectory() {
-		return _logDir;
-	}
-
-	public void setLogDirectory(Path logDir) {
-		_logDir = logDir;
-	}
-
-	public List<Job> jobs() {
-		if (_jobs != null && !_jobs.isEmpty()) {
-			return Collections.unmodifiableList(_jobs);
-		}
-		return null;
-	}
-
-	public void addJob(Job... jobs) {
-		if (_jobs == null) {
-			_jobs = new ArrayList<Job>();
-		}
-		if (jobs != null && jobs.length > 0) {
-			for (Job job : jobs) {
-				_jobs.add(job);
-			}
-		}
-	}
-
-	public void clearJobs() {
-		if (_jobs != null) {
-			_jobs.clear();
-		}
-	}
-
-	public void setJobs(Collection<Job> jobs) {
-		clearJobs();
-		if (jobs != null) {
-			for (Job job : jobs) {
-				addJob(job);
-			}
-		}
-	}
-
-	public boolean daemon() {
-		return _daemon;
-	}
-
-	public void setDaemon(boolean daemon) {
-		_daemon = daemon;
-	}
-
-	public int daemonListenerPort() {
-		return _daemonListenerPort;
-	}
-
-	public void setDaemonListenerPort(int port) {
-		_daemonListenerPort = port;
-	}
-
-	public int numberOfWorkers() {
-		return _nbWorkers;
-	}
-
-	public void setNumberOfWorkers(int nbWorkers) {
-		if (nbWorkers <= 1) {
-			_nbWorkers = 1;
-		} else {
-			if (nbWorkers > MAX_NUM_OF_WORKERS) {
-				_nbWorkers = MAX_NUM_OF_WORKERS;
-			} else {
-				_nbWorkers = nbWorkers;
-			}
-		}
-	}
-
-	public int numberOfQueriers() {
-		return _nbQueriers;
-	}
-
-	public void setNumberOfQueriers(int nbQueriers) {
-		if (nbQueriers <= 1) {
-			_nbQueriers = 1;
-		} else {
-			if (nbQueriers > MAX_NUM_OF_QUERIERS) {
-				_nbQueriers = MAX_NUM_OF_QUERIERS;
-			} else {
-				_nbQueriers = nbQueriers;
-			}
-		}
-	}
-
-	public void compile(MFSession session) throws Throwable {
-		// TODO
-	}
-
-	// public void validate(MFSession session) throws Throwable {
-	// for (Job job : _jobs) {
-	// /*
-	// * check if namespace is specified
-	// */
-	// if (job.namespace() == null) {
-	// throw new IllegalArgumentException("Asset namespace is null.", new
-	// NullPointerException());
-	// }
-	// /*
-	// * check if parent namespace exists
-	// */
-	// String parentNS = job.parentNamespace();
-	// boolean parentNSExists =
-	// !AssetNamespaceUtils.assetNamespaceExists(session,
-	// PathUtils.getParentPath(parentNS));
-	// if (!parentNSExists) {
-	// throw new IllegalArgumentException("Asset namespace: '" + parentNS + "'
-	// does not exist.");
-	// }
-	//
-	// /*
-	// * check if directory is specified
-	// */
-	// if (job.directory() == null) {
-	// throw new IllegalArgumentException("Source directory is null.", new
-	// NullPointerException());
-	// }
-	// /*
-	// * check if parent directory exists
-	// */
-	// Path parentDir = job.directory().toAbsolutePath().getParent();
-	// if (!Files.isDirectory(parentDir)) {
-	// throw new IllegalArgumentException("'" + parentDir + "' does not exist or
-	// it is not a directory.");
-	// }
-	// }
-	// }
-
-	public boolean createNamespaces() {
-		return _createNamespaces;
-	}
-
-	public void setCreateNamespaces(boolean createNamespaces) {
-		_createNamespaces = createNamespaces;
-	}
-
-	public boolean csumCheck() {
-		return _csumCheck;
-	}
-
-	public void setCsumCheck(boolean csumCheck) {
-		_csumCheck = csumCheck;
-	}
-
-	/**
-	 * Currently for download only.
-	 *
-	 * @return
-	 */
-	public boolean overwrite() {
-		return _overwrite;
-	}
-
-	/**
-	 * Currently for download only.
-	 *
-	 * @param overwrite
-	 */
-	public void setOverwrite(boolean overwrite) {
-		_overwrite = overwrite;
-	}
-
-	/**
-	 * Currently for download only.
-	 *
-	 * @return
-	 */
-	public AssetDownloadTask.Unarchive unarchive() {
-		return _unarchive;
-	}
-
-	/**
-	 * Currently for download only.
-	 *
-	 * @param unarchive
-	 */
-	public void setUnarchive(AssetDownloadTask.Unarchive unarchive) {
-		_unarchive = unarchive;
-	}
-
-	/**
-	 * For check only.
-	 *
-	 * @return
-	 */
-	public CheckHandler checkHandler() {
-		return _checkHandler;
-	}
-
-	/**
-	 * For check only.
-	 *
-	 * @param ch
-	 */
-	public void setCheckHandler(CheckHandler ch) {
-		_checkHandler = ch;
-	}
-
-	public int retry() {
-		return _maxRetries;
-	}
-
-	public void setMaxRetries(int retry) {
-		if (retry < 0) {
-			_maxRetries = 0;
-		}
-		_maxRetries = retry;
-	}
-
-	public boolean excludeEmptyFolder() {
-		return _excludeEmptyFolder;
-	}
-
-	public void setExcludeEmptyFolder(boolean excludeEmptyFolder) {
-		_excludeEmptyFolder = excludeEmptyFolder;
-	}
-
-	public boolean verbose() {
-		return _verbose;
-	}
-
-	public void setVerbose(boolean verbose) {
-		_verbose = verbose;
-	}
-
-	public long daemonScanInterval() {
-		return _daemonScanInterval;
-	}
-
-	public void setDaemonScanInterval(long interval) {
-		_daemonScanInterval = interval;
-	}
-
-	public boolean hasRecipients() {
-		return _recipients != null && !_recipients.isEmpty();
-	}
-
-	public Collection<String> recipients() {
-		return _recipients == null ? null : Collections.unmodifiableCollection(_recipients);
-	}
-
-	public void addRecipients(String... emails) {
-		if (emails != null && emails.length > 0) {
-			if (_recipients == null) {
-				_recipients = new LinkedHashSet<String>();
-			}
-			for (String email : emails) {
-				_recipients.add(email.toLowerCase());
-			}
-		}
-	}
-
-	public boolean hasJobs() {
-		return _jobs != null && !_jobs.isEmpty();
-	}
-
-	public boolean hasOnlyCheckJobs() {
-		if (hasJobs()) {
-			for (Job job : _jobs) {
-				if (job.action().type() != Action.Type.CHECK) {
-					return false;
-				}
-			}
-		}
-		return true;
-	}
-
-	public void loadFromXml(XmlDoc.Element pe, String source, MFSession session) throws Throwable {
-		String src = (source == null || source.isEmpty()) ? "" : "(" + source + ")";
-		XmlDoc.Element se = pe.element("sync/settings");
-		if (se == null) {
-			throw new Exception(
-					"Failed to parse configuration" + src + ": Element properties/sync/settings is not found.");
-		}
-
-		if (se.elementExists("numberOfQueriers")) {
-			int nbQueriers = se.intValue("numberOfQueriers");
-			if (nbQueriers <= 0 || nbQueriers > Settings.MAX_NUM_OF_WORKERS) {
-				throw new IllegalArgumentException(
-						"Failed to parse numberOfQueriers from XML configuration. Expects integer between 0 and "
-								+ Settings.MAX_NUM_OF_QUERIERS + ". Found: " + nbQueriers);
-			}
-			setNumberOfQueriers(nbQueriers);
-		}
-		if (se.elementExists("numberOfWorkers")) {
-			int nbWorkers = se.intValue("numberOfWorkers");
-			if (nbWorkers <= 0 || nbWorkers > Settings.MAX_NUM_OF_WORKERS) {
-				throw new IllegalArgumentException(
-						"Failed to parse numberOfWorkers from XML configuration. Expects integer between 0 and "
-								+ Settings.MAX_NUM_OF_WORKERS + ". Found: " + nbWorkers);
-			}
-			setNumberOfWorkers(nbWorkers);
-		}
-		if (se.elementExists("batchSize")) {
-			setBatchSize(se.intValue("batchSize"));
-		}
-		if (se.elementExists("maxRetries")) {
-			setMaxRetries(se.intValue("maxRetries"));
-		}
-		if (se.elementExists("csumCheck")) {
-			setCsumCheck(se.booleanValue("csumCheck", false));
-		}
-		if (se.elementExists("followSymlinks")) {
-			setFollowSymlinks(se.booleanValue("followSymlinks", false));
-		}
-		if (se.elementExists("restoreSymlinks")) {
-			setRestoreSymlinks(se.booleanValue("restoreSymlinks", true));
-		}
-		if (se.elementExists("overwrite")) {
-			setOverwrite(se.booleanValue("overwrite", false));
-		}
-		if (se.elementExists("unarchive")) {
-			setUnarchive(Unarchive.fromString(se.value("unarchive")));
-		}
-		if (se.elementExists("daemon")) {
-			setDaemon(se.booleanValue("daemon/@enabled", false));
-			if (se.elementExists("daemon/listenerPort")) {
-				setDaemonListenerPort(se.intValue("daemon/listenerPort"));
-			}
-			if (se.elementExists("daemon/scanInterval")) {
-				setDaemonScanInterval(se.intValue("daemon/scanInterval"));
-			}
-		}
-		if (se.elementExists("worm")) {
-			setWorm(se.booleanValue("worm/enable", false));
-			setWormCanAddVersions(se.booleanValue("worm/canAddVersions", false));
-			setWormCanMove(se.booleanValue("worm/canMove", true));
-		}
-		if (se.elementExists("logDirectory")) {
-			Path logDir = Paths.get(se.value("logDirectory"));
-			if (!Files.exists(logDir)) {
-				throw new FileNotFoundException(logDir.toString());
-			}
-			if (!Files.isDirectory(logDir)) {
-				throw new Exception(logDir.toString() + " is not a directory!");
-			}
-			setLogDirectory(logDir);
-		}
-		if (se.elementExists("logFileSizeMB")) {
-			setLogFileSizeMB(se.intValue("logFileSizeMB"));
-		}
-		if (se.elementExists("logFileCount")) {
-			setLogFileCount(se.intValue("logFileCount"));
-		}
-		if (se.elementExists("excludeEmptyFolder")) {
-			setExcludeEmptyFolder(se.booleanValue("excludeEmptyFolder"));
-		}
-		if (se.elementExists("verbose")) {
-			setVerbose(se.booleanValue("verbose"));
-		}
-		if (se.elementExists("notification/email")) {
-			Collection<String> emails = se.values("notification/email");
-			if (emails != null) {
-				for (String email : emails) {
-					addRecipients(email);
-				}
-			}
-		}
-
-		// Add jobs
-		List<XmlDoc.Element> jes = pe.elements("sync/job");
-		if (jes != null) {
-			for (XmlDoc.Element je : jes) {
-				addJob(Job.parse(je, source, session));
-			}
-		}
-	}
-
-	public void loadFromXmlFile(Path xmlFile, MFSession session) throws Throwable {
-		if (xmlFile != null) {
-			Reader r = new BufferedReader(new FileReader(xmlFile.toFile()));
-			try {
-				XmlDoc.Element pe = new XmlDoc().parse(r);
-				if (pe == null) {
-					throw new IllegalArgumentException("Failed to parse configuration XML file: " + xmlFile);
-				}
-				loadFromXml(pe, xmlFile.toString(), session);
-			} finally {
-				r.close();
-			}
-		}
-	}
-
-	public boolean hasDownloadJobs() {
-		if (_jobs != null && !_jobs.isEmpty()) {
-			for (Job job : _jobs) {
-				if (job.action() == Action.DOWNLOAD) {
-					return true;
-				}
-			}
-		}
-		return false;
-	}
-
-	public boolean hasUploadJobs() {
-		if (_jobs != null && !_jobs.isEmpty()) {
-			for (Job job : _jobs) {
-				if (job.action() == Action.UPLOAD) {
-					return true;
-				}
-			}
-		}
-		return false;
-	}
-
-	/**
-	 * Delete local files.
-	 *
-	 * @return
-	 */
-	public boolean deleteFiles() {
-		return _deleteFiles;
-	}
-
-	public void setDeleteFiles(boolean deleteFiles) {
-		_deleteFiles = deleteFiles;
-	}
-
-	/**
-	 * Destroy remote assets.
-	 *
-	 * @return
-	 */
-	public boolean deleteAssets() {
-		return _deleteAssets;
-	}
-
-	public void setDeleteAssets(boolean deleteAssets) {
-		_deleteAssets = deleteAssets;
-	}
-
-	public boolean hardDestroyAssets() {
-		return _hardDestroyAssets;
-	}
-
-	public void setHardDestroyAssets(boolean hardDestroy) {
-		_hardDestroyAssets = hardDestroy;
-	}
-
-	public boolean needToDeleteFiles() {
-		return (hasDownloadJobs()) && deleteFiles();
-	}
-
-	public boolean needToDeleteAssets() {
-		return (hasUploadJobs()) && deleteAssets();
-	}
-
-	public boolean followSymlinks() {
-		return _followSymlinks;
-	}
-
-	public void setFollowSymlinks(boolean followSymlinks) {
-		_followSymlinks = followSymlinks;
-	}
-
-	/**
-	 * restore symbolic links when downloading
-	 *
-	 * @return
-	 */
-	public boolean restoreSymlinks() {
-		return _restoreSymlinks;
-	}
-
-	/**
-	 * set whether or not to restore symbolic links when downloading.
-	 *
-	 * @param restoreSymlinks
-	 */
-	public void setRestoreSymlinks(boolean restoreSymlinks) {
-		_restoreSymlinks = restoreSymlinks;
-	}
-
-	public int logFileSizeMB() {
-		return _logFileSizeMB;
-	}
-
-	public void setLogFileSizeMB(int fileSizeMB) {
-		if (fileSizeMB < MIN_LOG_FILE_SIZE_MB || fileSizeMB > MAX_LOG_FILE_SIZE_MB) {
-			throw new IllegalArgumentException("Expects integer value between " + MIN_LOG_FILE_SIZE_MB + " and "
-					+ MAX_LOG_FILE_SIZE_MB + ". Found " + fileSizeMB);
-		}
-		_logFileSizeMB = fileSizeMB;
-	}
-
-	public int logFileCount() {
-		return _logFileCount;
-	}
-
-	public void setLogFileCount(int fileCount) {
-		if (fileCount < MIN_LOG_FILE_COUNT || fileCount > MAX_LOG_FILE_COUNT) {
-			throw new IllegalArgumentException("Expects integer value between " + MIN_LOG_FILE_COUNT + " and "
-					+ MAX_LOG_FILE_COUNT + ". Found " + fileCount);
-		}
-		_logFileCount = fileCount;
-	}
-
-	public boolean excludeParent() {
-		return _excludeParent;
-	}
-
-	public void setExcludeParent(boolean excludeParent) {
-		_excludeParent = excludeParent;
-	}
-
-	public boolean worm() {
-		return _worm;
-	}
-
-	public void setWorm(boolean worm) {
-		_worm = worm;
-	}
-
-	public boolean wormCanAddVersions() {
-		return _wormCanAddVersions;
-	}
-
-	public void setWormCanAddVersions(boolean wormCanAddVersions) {
-		_wormCanAddVersions = wormCanAddVersions;
-	}
-
-	public boolean wormCanMove() {
-		return _wormCanMove;
-	}
-
-	public void setWormCanMove(boolean wormCanMove) {
-		_wormCanMove = wormCanMove;
-	}
-
-	public boolean includeMetadata() {
-		return _includeMetadata;
-	}
-
-	public void setIncludeMetadata(boolean includeMetadata) {
-		_includeMetadata = includeMetadata;
-	}
-
-	public long aggregateDownloadThreshold() {
-		return _aggregateDownloadThreshold;
-	}
-
-	public void setAggregateDownloadThreshold(Long aggregateDownloadThreshold) {
-		_aggregateDownloadThreshold = aggregateDownloadThreshold;
-	}
-
-	public void enableAggregateDownload() {
-		setAggregateDownloadThreshold(DEFAULT_AGGREGATE_DOWNLOAD_THRESHOLD);
-	}
-
-	public void disableAggregateDownload() {
-		setAggregateDownloadThreshold(0L);
-	}
-
-	public long aggregateUploadThreshold() {
-		return _aggregateUploadThreshold;
-	}
-
-	public void setAggregateUploadThreshold(Long aggregateUploadThreshold) {
-		_aggregateUploadThreshold = aggregateUploadThreshold;
-	}
-
-	public void enableAggregateUpload() {
-		setAggregateUploadThreshold(DEFAULT_AGGREGATE_UPLOAD_THRESHOLD);
-	}
-
-	public void disableAggregateUpload() {
-		setAggregateUploadThreshold(0L);
-	}
-
-	public long splitThreshold() {
-		return _splitThreshold;
-	}
-
-	public void setSplitThreshold(long splitThreshold) {
-		_splitThreshold = splitThreshold;
-	}
-
-	public void enableSplit() {
-		_splitThreshold = DEFAULT_SPLIT_THRESHOLD;
-	}
-
-	public void disableSplit() {
-		_splitThreshold = 0;
-	}
-
-}
+package unimelb.mf.client.sync.settings;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.Reader;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+import arc.xml.XmlDoc;
+import unimelb.mf.client.session.MFSession;
+import unimelb.mf.client.sync.check.CheckHandler;
+import unimelb.mf.client.sync.task.AssetDownloadTask;
+import unimelb.mf.client.sync.task.AssetDownloadTask.Unarchive;
+import unimelb.mf.client.task.MFApp;
+
+public class Settings implements MFApp.Settings {
+
+	public static final int DEFAULT_DAEMON_LISTENER_PORT = 9761;
+	public static final long DEFAULT_DAEMON_SCAN_INTERVAL = 60000L;
+	public static final int DEFAULT_NUM_OF_QUERIERS = 1;
+	public static final int DEFAULT_NUM_OF_WORKERS = 1;
+	public static final int DEFAULT_BATCH_SIZE = 1000;
+	public static final int DEFAULT_MAX_RETRIES = 0;
+	public static final int DEFAULT_LOG_FILE_SIZE_MB = 100; // 100MB
+	public static final int MIN_LOG_FILE_SIZE_MB = 1;// 1MB
+	public static final int MAX_LOG_FILE_SIZE_MB = 2000;// 2000MB
+	public static final int DEFAULT_LOG_FILE_COUNT = 2;
+	public static final int MIN_LOG_FILE_COUNT = 1;
+	public static final int MAX_LOG_FILE_COUNT = 10;
+	public static final long DEFAULT_AGGREGATE_UPLOAD_THRESHOLD = 1000000L;
+	public static final long DEFAULT_AGGREGATE_DOWNLOAD_THRESHOLD = 1000000L;
+	public static final long DEFAULT_SPLIT_THRESHOLD = 1000000000L; // 1GB
+
+	private List<Job> _jobs;
+	private Path _logDir = null;
+	private int _logFileSizeMB = DEFAULT_LOG_FILE_SIZE_MB;
+	private int _logFileCount = DEFAULT_LOG_FILE_COUNT;
+
+	private int _nbQueriers = DEFAULT_NUM_OF_QUERIERS;
+	private int _nbWorkers = DEFAULT_NUM_OF_WORKERS;
+
+	private boolean _daemon = false;
+	private int _daemonListenerPort = DEFAULT_DAEMON_LISTENER_PORT;
+	private long _daemonScanInterval = DEFAULT_DAEMON_SCAN_INTERVAL;
+
+	private int _batchSize = DEFAULT_BATCH_SIZE;
+
+	private boolean _createNamespaces;
+	private boolean _csumCheck;
+
+	private boolean _includeMetadata = false; // download metadata xml
+
+	private long _aggregateDownloadThreshold = 0L;
+
+	private long _aggregateUploadThreshold = 0L;
+
+	private long _splitThreshold = Long.MAX_VALUE;
+
+	private int _maxRetries = DEFAULT_MAX_RETRIES; // Number of retries...
+
+	/*
+	 * download settings
+	 */
+	private boolean _overwrite = false;
+	private AssetDownloadTask.Unarchive _unarchive = AssetDownloadTask.Unarchive.NONE;
+
+	/*
+	 * upload settings;
+	 */
+	private boolean _excludeEmptyFolder = true;
+
+	/*
+	 * check settings
+	 */
+	private CheckHandler _checkHandler = null;
+
+	private boolean _verbose = false;
+
+	private Set<String> _recipients;
+
+	private boolean _deleteFiles = false;
+
+	private boolean _deleteAssets = false;
+
+	private boolean _hardDestroyAssets = false;
+
+	// follow symbolic links when uploading
+	private boolean _followSymlinks = false;
+
+	// try restoring symbolic links when downloading
+	private boolean _restoreSymlinks = true;
+
+	// exclude parent if the source path ends with trailing slash
+	private boolean _excludeParent = false;
+
+	private boolean _worm = false;
+
+	private boolean _wormCanAddVersions = false;
+
+	private boolean _wormCanMove = true;
+
+	public Settings() {
+		_jobs = new ArrayList<Job>();
+	}
+
+	public int batchSize() {
+		return _batchSize;
+	}
+
+	public void setBatchSize(int batchSize) {
+		if (batchSize < 1) {
+			batchSize = DEFAULT_BATCH_SIZE;
+		}
+		_batchSize = batchSize;
+	}
+
+	public Path logDirectory() {
+		return _logDir;
+	}
+
+	public void setLogDirectory(Path logDir) {
+		_logDir = logDir;
+	}
+
+	public List<Job> jobs() {
+		if (_jobs != null && !_jobs.isEmpty()) {
+			return Collections.unmodifiableList(_jobs);
+		}
+		return null;
+	}
+
+	public void addJob(Job... jobs) {
+		if (_jobs == null) {
+			_jobs = new ArrayList<Job>();
+		}
+		if (jobs != null && jobs.length > 0) {
+			for (Job job : jobs) {
+				_jobs.add(job);
+			}
+		}
+	}
+
+	public void clearJobs() {
+		if (_jobs != null) {
+			_jobs.clear();
+		}
+	}
+
+	public void setJobs(Collection<Job> jobs) {
+		clearJobs();
+		if (jobs != null) {
+			for (Job job : jobs) {
+				addJob(job);
+			}
+		}
+	}
+
+	public boolean daemon() {
+		return _daemon;
+	}
+
+	public void setDaemon(boolean daemon) {
+		_daemon = daemon;
+	}
+
+	public int daemonListenerPort() {
+		return _daemonListenerPort;
+	}
+
+	public void setDaemonListenerPort(int port) {
+		_daemonListenerPort = port;
+	}
+
+	public int numberOfWorkers() {
+		return _nbWorkers;
+	}
+
+	public void setNumberOfWorkers(int nbWorkers) {
+		if (nbWorkers <= 1) {
+			_nbWorkers = 1;
+		} else {
+			if (nbWorkers > MAX_NUM_OF_WORKERS) {
+				_nbWorkers = MAX_NUM_OF_WORKERS;
+			} else {
+				_nbWorkers = nbWorkers;
+			}
+		}
+	}
+
+	public int numberOfQueriers() {
+		return _nbQueriers;
+	}
+
+	public void setNumberOfQueriers(int nbQueriers) {
+		if (nbQueriers <= 1) {
+			_nbQueriers = 1;
+		} else {
+			if (nbQueriers > MAX_NUM_OF_QUERIERS) {
+				_nbQueriers = MAX_NUM_OF_QUERIERS;
+			} else {
+				_nbQueriers = nbQueriers;
+			}
+		}
+	}
+
+	public void compile(MFSession session) throws Throwable {
+		// TODO
+	}
+
+	// public void validate(MFSession session) throws Throwable {
+	// for (Job job : _jobs) {
+	// /*
+	// * check if namespace is specified
+	// */
+	// if (job.namespace() == null) {
+	// throw new IllegalArgumentException("Asset namespace is null.", new
+	// NullPointerException());
+	// }
+	// /*
+	// * check if parent namespace exists
+	// */
+	// String parentNS = job.parentNamespace();
+	// boolean parentNSExists =
+	// !AssetNamespaceUtils.assetNamespaceExists(session,
+	// PathUtils.getParentPath(parentNS));
+	// if (!parentNSExists) {
+	// throw new IllegalArgumentException("Asset namespace: '" + parentNS + "'
+	// does not exist.");
+	// }
+	//
+	// /*
+	// * check if directory is specified
+	// */
+	// if (job.directory() == null) {
+	// throw new IllegalArgumentException("Source directory is null.", new
+	// NullPointerException());
+	// }
+	// /*
+	// * check if parent directory exists
+	// */
+	// Path parentDir = job.directory().toAbsolutePath().getParent();
+	// if (!Files.isDirectory(parentDir)) {
+	// throw new IllegalArgumentException("'" + parentDir + "' does not exist or
+	// it is not a directory.");
+	// }
+	// }
+	// }
+
+	public boolean createNamespaces() {
+		return _createNamespaces;
+	}
+
+	public void setCreateNamespaces(boolean createNamespaces) {
+		_createNamespaces = createNamespaces;
+	}
+
+	public boolean csumCheck() {
+		return _csumCheck;
+	}
+
+	public void setCsumCheck(boolean csumCheck) {
+		_csumCheck = csumCheck;
+	}
+
+	/**
+	 * Currently for download only.
+	 *
+	 * @return
+	 */
+	public boolean overwrite() {
+		return _overwrite;
+	}
+
+	/**
+	 * Currently for download only.
+	 *
+	 * @param overwrite
+	 */
+	public void setOverwrite(boolean overwrite) {
+		_overwrite = overwrite;
+	}
+
+	/**
+	 * Currently for download only.
+	 *
+	 * @return
+	 */
+	public AssetDownloadTask.Unarchive unarchive() {
+		return _unarchive;
+	}
+
+	/**
+	 * Currently for download only.
+	 *
+	 * @param unarchive
+	 */
+	public void setUnarchive(AssetDownloadTask.Unarchive unarchive) {
+		_unarchive = unarchive;
+	}
+
+	/**
+	 * For check only.
+	 *
+	 * @return
+	 */
+	public CheckHandler checkHandler() {
+		return _checkHandler;
+	}
+
+	/**
+	 * For check only.
+	 *
+	 * @param ch
+	 */
+	public void setCheckHandler(CheckHandler ch) {
+		_checkHandler = ch;
+	}
+
+	public int retry() {
+		return _maxRetries;
+	}
+
+	public void setMaxRetries(int retry) {
+		if (retry < 0) {
+			_maxRetries = 0;
+		}
+		_maxRetries = retry;
+	}
+
+	public boolean excludeEmptyFolder() {
+		return _excludeEmptyFolder;
+	}
+
+	public void setExcludeEmptyFolder(boolean excludeEmptyFolder) {
+		_excludeEmptyFolder = excludeEmptyFolder;
+	}
+
+	public boolean verbose() {
+		return _verbose;
+	}
+
+	public void setVerbose(boolean verbose) {
+		_verbose = verbose;
+	}
+
+	public long daemonScanInterval() {
+		return _daemonScanInterval;
+	}
+
+	public void setDaemonScanInterval(long interval) {
+		_daemonScanInterval = interval;
+	}
+
+	public boolean hasRecipients() {
+		return _recipients != null && !_recipients.isEmpty();
+	}
+
+	public Collection<String> recipients() {
+		return _recipients == null ? null : Collections.unmodifiableCollection(_recipients);
+	}
+
+	public void addRecipients(String... emails) {
+		if (emails != null && emails.length > 0) {
+			if (_recipients == null) {
+				_recipients = new LinkedHashSet<String>();
+			}
+			for (String email : emails) {
+				_recipients.add(email.toLowerCase());
+			}
+		}
+	}
+
+	public boolean hasJobs() {
+		return _jobs != null && !_jobs.isEmpty();
+	}
+
+	public boolean hasOnlyCheckJobs() {
+		if (hasJobs()) {
+			for (Job job : _jobs) {
+				if (job.action().type() != Action.Type.CHECK) {
+					return false;
+				}
+			}
+		}
+		return true;
+	}
+
+	public void loadFromXml(XmlDoc.Element pe, String source, MFSession session) throws Throwable {
+		String src = (source == null || source.isEmpty()) ? "" : "(" + source + ")";
+		XmlDoc.Element se = pe.element("sync/settings");
+		if (se == null) {
+			throw new Exception(
+					"Failed to parse configuration" + src + ": Element properties/sync/settings is not found.");
+		}
+
+		if (se.elementExists("numberOfQueriers")) {
+			int nbQueriers = se.intValue("numberOfQueriers");
+			if (nbQueriers <= 0 || nbQueriers > Settings.MAX_NUM_OF_WORKERS) {
+				throw new IllegalArgumentException(
+						"Failed to parse numberOfQueriers from XML configuration. Expects integer between 0 and "
+								+ Settings.MAX_NUM_OF_QUERIERS + ". Found: " + nbQueriers);
+			}
+			setNumberOfQueriers(nbQueriers);
+		}
+		if (se.elementExists("numberOfWorkers")) {
+			int nbWorkers = se.intValue("numberOfWorkers");
+			if (nbWorkers <= 0 || nbWorkers > Settings.MAX_NUM_OF_WORKERS) {
+				throw new IllegalArgumentException(
+						"Failed to parse numberOfWorkers from XML configuration. Expects integer between 0 and "
+								+ Settings.MAX_NUM_OF_WORKERS + ". Found: " + nbWorkers);
+			}
+			setNumberOfWorkers(nbWorkers);
+		}
+		if (se.elementExists("batchSize")) {
+			setBatchSize(se.intValue("batchSize"));
+		}
+		if (se.elementExists("maxRetries")) {
+			setMaxRetries(se.intValue("maxRetries"));
+		}
+		if (se.elementExists("csumCheck")) {
+			setCsumCheck(se.booleanValue("csumCheck", false));
+		}
+		if (se.elementExists("followSymlinks")) {
+			setFollowSymlinks(se.booleanValue("followSymlinks", false));
+		}
+		if (se.elementExists("restoreSymlinks")) {
+			setRestoreSymlinks(se.booleanValue("restoreSymlinks", true));
+		}
+		if (se.elementExists("overwrite")) {
+			setOverwrite(se.booleanValue("overwrite", false));
+		}
+		if (se.elementExists("unarchive")) {
+			setUnarchive(Unarchive.fromString(se.value("unarchive")));
+		}
+		if (se.elementExists("daemon")) {
+			setDaemon(se.booleanValue("daemon/@enabled", false));
+			if (se.elementExists("daemon/listenerPort")) {
+				setDaemonListenerPort(se.intValue("daemon/listenerPort"));
+			}
+			if (se.elementExists("daemon/scanInterval")) {
+				setDaemonScanInterval(se.intValue("daemon/scanInterval"));
+			}
+		}
+		if (se.elementExists("worm")) {
+			setWorm(se.booleanValue("worm/enable", false));
+			setWormCanAddVersions(se.booleanValue("worm/canAddVersions", false));
+			setWormCanMove(se.booleanValue("worm/canMove", true));
+		}
+		if (se.elementExists("logDirectory")) {
+			Path logDir = Paths.get(se.value("logDirectory"));
+			if (!Files.exists(logDir)) {
+				throw new FileNotFoundException(logDir.toString());
+			}
+			if (!Files.isDirectory(logDir)) {
+				throw new Exception(logDir.toString() + " is not a directory!");
+			}
+			setLogDirectory(logDir);
+		}
+		if (se.elementExists("logFileSizeMB")) {
+			setLogFileSizeMB(se.intValue("logFileSizeMB"));
+		}
+		if (se.elementExists("logFileCount")) {
+			setLogFileCount(se.intValue("logFileCount"));
+		}
+		if (se.elementExists("excludeEmptyFolder")) {
+			setExcludeEmptyFolder(se.booleanValue("excludeEmptyFolder"));
+		}
+		if (se.elementExists("verbose")) {
+			setVerbose(se.booleanValue("verbose"));
+		}
+		if (se.elementExists("notification/email")) {
+			Collection<String> emails = se.values("notification/email");
+			if (emails != null) {
+				for (String email : emails) {
+					addRecipients(email);
+				}
+			}
+		}
+
+		// Add jobs
+		List<XmlDoc.Element> jes = pe.elements("sync/job");
+		if (jes != null) {
+			for (XmlDoc.Element je : jes) {
+				addJob(Job.parse(je, source, session));
+			}
+		}
+	}
+
+	public void loadFromXmlFile(Path xmlFile, MFSession session) throws Throwable {
+		if (xmlFile != null) {
+			Reader r = new BufferedReader(new FileReader(xmlFile.toFile()));
+			try {
+				XmlDoc.Element pe = new XmlDoc().parse(r);
+				if (pe == null) {
+					throw new IllegalArgumentException("Failed to parse configuration XML file: " + xmlFile);
+				}
+				loadFromXml(pe, xmlFile.toString(), session);
+			} finally {
+				r.close();
+			}
+		}
+	}
+
+	public boolean hasDownloadJobs() {
+		if (_jobs != null && !_jobs.isEmpty()) {
+			for (Job job : _jobs) {
+				if (job.action() == Action.DOWNLOAD) {
+					return true;
+				}
+			}
+		}
+		return false;
+	}
+
+	public boolean hasUploadJobs() {
+		if (_jobs != null && !_jobs.isEmpty()) {
+			for (Job job : _jobs) {
+				if (job.action() == Action.UPLOAD) {
+					return true;
+				}
+			}
+		}
+		return false;
+	}
+
+	/**
+	 * Delete local files.
+	 *
+	 * @return
+	 */
+	public boolean deleteFiles() {
+		return _deleteFiles;
+	}
+
+	public void setDeleteFiles(boolean deleteFiles) {
+		_deleteFiles = deleteFiles;
+	}
+
+	/**
+	 * Destroy remote assets.
+	 *
+	 * @return
+	 */
+	public boolean deleteAssets() {
+		return _deleteAssets;
+	}
+
+	public void setDeleteAssets(boolean deleteAssets) {
+		_deleteAssets = deleteAssets;
+	}
+
+	public boolean hardDestroyAssets() {
+		return _hardDestroyAssets;
+	}
+
+	public void setHardDestroyAssets(boolean hardDestroy) {
+		_hardDestroyAssets = hardDestroy;
+	}
+
+	public boolean needToDeleteFiles() {
+		return (hasDownloadJobs()) && deleteFiles();
+	}
+
+	public boolean needToDeleteAssets() {
+		return (hasUploadJobs()) && deleteAssets();
+	}
+
+	public boolean followSymlinks() {
+		return _followSymlinks;
+	}
+
+	public void setFollowSymlinks(boolean followSymlinks) {
+		_followSymlinks = followSymlinks;
+	}
+
+	/**
+	 * restore symbolic links when downloading
+	 *
+	 * @return
+	 */
+	public boolean restoreSymlinks() {
+		return _restoreSymlinks;
+	}
+
+	/**
+	 * set whether or not to restore symbolic links when downloading.
+	 *
+	 * @param restoreSymlinks
+	 */
+	public void setRestoreSymlinks(boolean restoreSymlinks) {
+		_restoreSymlinks = restoreSymlinks;
+	}
+
+	public int logFileSizeMB() {
+		return _logFileSizeMB;
+	}
+
+	public void setLogFileSizeMB(int fileSizeMB) {
+		if (fileSizeMB < MIN_LOG_FILE_SIZE_MB || fileSizeMB > MAX_LOG_FILE_SIZE_MB) {
+			throw new IllegalArgumentException("Expects integer value between " + MIN_LOG_FILE_SIZE_MB + " and "
+					+ MAX_LOG_FILE_SIZE_MB + ". Found " + fileSizeMB);
+		}
+		_logFileSizeMB = fileSizeMB;
+	}
+
+	public int logFileCount() {
+		return _logFileCount;
+	}
+
+	public void setLogFileCount(int fileCount) {
+		if (fileCount < MIN_LOG_FILE_COUNT || fileCount > MAX_LOG_FILE_COUNT) {
+			throw new IllegalArgumentException("Expects integer value between " + MIN_LOG_FILE_COUNT + " and "
+					+ MAX_LOG_FILE_COUNT + ". Found " + fileCount);
+		}
+		_logFileCount = fileCount;
+	}
+
+	public boolean excludeParent() {
+		return _excludeParent;
+	}
+
+	public void setExcludeParent(boolean excludeParent) {
+		_excludeParent = excludeParent;
+	}
+
+	public boolean worm() {
+		return _worm;
+	}
+
+	public void setWorm(boolean worm) {
+		_worm = worm;
+	}
+
+	public boolean wormCanAddVersions() {
+		return _wormCanAddVersions;
+	}
+
+	public void setWormCanAddVersions(boolean wormCanAddVersions) {
+		_wormCanAddVersions = wormCanAddVersions;
+	}
+
+	public boolean wormCanMove() {
+		return _wormCanMove;
+	}
+
+	public void setWormCanMove(boolean wormCanMove) {
+		_wormCanMove = wormCanMove;
+	}
+
+	public boolean includeMetadata() {
+		return _includeMetadata;
+	}
+
+	public void setIncludeMetadata(boolean includeMetadata) {
+		_includeMetadata = includeMetadata;
+	}
+
+	public long aggregateDownloadThreshold() {
+		return _aggregateDownloadThreshold;
+	}
+
+	public void setAggregateDownloadThreshold(Long aggregateDownloadThreshold) {
+		_aggregateDownloadThreshold = aggregateDownloadThreshold;
+	}
+
+	public void enableAggregateDownload() {
+		setAggregateDownloadThreshold(DEFAULT_AGGREGATE_DOWNLOAD_THRESHOLD);
+	}
+
+	public void disableAggregateDownload() {
+		setAggregateDownloadThreshold(0L);
+	}
+
+	public long aggregateUploadThreshold() {
+		return _aggregateUploadThreshold;
+	}
+
+	public void setAggregateUploadThreshold(Long aggregateUploadThreshold) {
+		_aggregateUploadThreshold = aggregateUploadThreshold;
+	}
+
+	public void enableAggregateUpload() {
+		setAggregateUploadThreshold(DEFAULT_AGGREGATE_UPLOAD_THRESHOLD);
+	}
+
+	public void disableAggregateUpload() {
+		setAggregateUploadThreshold(0L);
+	}
+
+	public long splitThreshold() {
+		return _splitThreshold;
+	}
+
+	public void setSplitThreshold(long splitThreshold) {
+		_splitThreshold = splitThreshold;
+	}
+
+	public void enableSplit() {
+		_splitThreshold = DEFAULT_SPLIT_THRESHOLD;
+	}
+
+	public void disableSplit() {
+		_splitThreshold = 0;
+	}
+
+}
diff --git a/src/main/scripts/windows/facility/facility-download-shell-script-url-create.cmd b/src/main/scripts/windows/facility/facility-download-shell-script-url-create.cmd
index 8d3e515ee2a8c56a4b2814a7ec5c3b7378ca546d..f8724f850ba9f143fa07d8b2c1928616fb63e722 100755
--- a/src/main/scripts/windows/facility/facility-download-shell-script-url-create.cmd
+++ b/src/main/scripts/windows/facility/facility-download-shell-script-url-create.cmd
@@ -1,195 +1,195 @@
-@echo off
-setlocal EnableExtensions EnableDelayedExpansion
-
-REM This is the root namespace for projects to be located in.
-REM The caller specifies only the project ID
-REM Needs to be enhanced to allow for projects in /projects (fails if you change the root to that)
-REM When this script is deployed, you must set the root namespace correctly.
-set NAMESPACE_ROOT=/projects/cryo-em
-
-REM check java
-where java >nul 2>nul
-IF %errorlevel% neq 0 (
-    echo error: no java is found. Install java and retry.
-    exit /b 1
-)
-
-REM script file name
-set PROG=%~0
-
-REM aterm.jar download url
-set ATERM_URL=https://mediaflux.researchsoftware.unimelb.edu.au/mflux/aterm.jar
-
-REM aterm.jar location
-if [%MFLUX_ATERM%]==[] set "MFLUX_ATERM=%~dp0..\..\..\lib\aterm.jar"
-if not exist "%MFLUX_ATERM%" (
-    set "MFLUX_ATERM=%USERPROFILE%\.Arcitecta\aterm.jar"
-    if not exist %USERPROFILE%\.Arcitecta\NUL mkdir "%USERPROFILE%\.Arcitecta"
-    call :download !ATERM_URL! !MFLUX_ATERM!
-    if !errorlevel! neq 0 exit /b 1
-)
-
-REM mflux.cfg location
-if [%MFLUX_CFG%]==[] set "MFLUX_CFG=%USERPROFILE%\.Arcitecta\mflux.cfg" 
-if not exist "%MFLUX_CFG%" set "MFLUX_CFG=%~dp0..\..\..\config\mflux.cfg"
-if not exist "%MFLUX_CFG%" set "MFLUX_CFG=%~dp0..\..\..\mflux.cfg"
-if not exist "%MFLUX_CFG%" set "MFLUX_CFG=%~dp0mflux.cfg"
-if not exist "%MFLUX_CFG%" (
-    set "MFLUX_CFG=%USERPROFILE%\.Arcitecta\mflux.cfg"
-    echo error: could not find %MFLUX_CFG%
-    exit /b 1
-)
-
-REM aterm command prefix
-set "ATERM=java -jar -Dmf.cfg=%MFLUX_CFG% %MFLUX_ATERM% nogui"
-
-REM service name
-set "SERVICE=unimelb.asset.download.shell.script.url.create"
-
-REM command (prefix)
-set "COMMAND=%ATERM% %SERVICE%"
-
-REM default argument values
-set EXPIRE_DAYS=14
-set OVERWRITE=false
-set VERBOSE=true
-
-REM 
-set PROJECT=
-set ROLE=
-set NAMESPACES=
-set EMAILS=
-
-REM parse arguments
-:loop
-if "%~1" NEQ "" (
-    if "%~1"=="--expire-days" (
-        set EXPIRE_DAYS=%~2
-        shift
-        shift
-        goto :loop
-    )
-    if "%~1"=="--overwrite" (
-        set OVERWRITE=true
-        shift
-        goto :loop
-    )
-    if "%~1"=="--quiet" (
-        set VERBOSE=false
-        shift
-        goto :loop
-    )
-    if "%~1"=="--email" (
-        set "value=%~2"
-        set value=!value:,= !
-        for %%e in (!value!) do (
-            if "!EMAILS!"=="" ( set "EMAILS=:to %%e" ) else ( set "EMAILS=!EMAILS! :to %%e" )
-        )
-        shift
-        shift
-        goto :loop
-    )
-    if "%~1"=="-h" (
-        call :usage
-        exit /b 0
-    )
-    if "%~1"=="--help" (
-        call :usage %PROG%
-        exit /b 0
-    )
-
-
-    set ns=%~1
-    if "!ns:~0,18!"=="!NAMESPACE_ROOT!/" (
-        for /f "delims=/" %%c in ("!ns:~18!") do (
-            set "prj=%%c"
-        )
-    ) else (
-        for /f "delims=/" %%c in ("!ns!") do (
-            set "prj=%%c"
-        )
-        set "ns=!NAMESPACE_ROOT!/!ns!"
-    )
-
-    if [!PROJECT!]==[] (
-        set PROJECT=!prj!
-        set "ROLE=!prj!:participant-a"
-    ) else (
-        if not "!PROJECT!"=="!prj!" (
-            echo error: cannot share namespaces from multiple projects.
-            exit /b 1
-        )
-    )
-    if "!NAMESPACES!" EQU "" (
-        set "NAMESPACES=:namespace ^"!ns!^""
-    ) else (
-        set "NAMESPACES=!NAMESPACES! :namespace ^"!ns!^""
-    )
-    shift
-    goto :loop
-)
-
-REM check if namespace is specified
-if "!NAMESPACES!"=="" (
-    echo error: no namespace is specified.
-    call :usage
-    exit /b 1
-)
-REM compose the command 
-set "COMMAND=%COMMAND% :download ^< %NAMESPACES% :token ^< :role -type role %ROLE% :to now+%EXPIRE_DAYS%day ^> :verbose %VERBOSE% :overwrite %OVERWRITE% ^> :token ^< :perm ^< :resource -type role:namespace %PROJECT%: :access ADMINISTER ^> ^>"
-if not "%EMAILS%"=="" (
-    set "COMMAND=%COMMAND% :email ^< %EMAILS% ^>"
-)
-
-REM execute aterm command to generate the script url
-%COMMAND%
-
-if %errorlevel% neq 0 (
-    exit /b 2
-)
-exit /b 0
-
-REM function to print usage
-:usage
-echo=
-echo Usage:
-echo     %PROG% [-h^|--help] [--expire-days ^<number-of-days^>] [--ncsr ^<ncsr^>] [--overwrite] [--quiet] [--email ^<addresses^>] ^<namespace^>
-echo=
-echo Options:
-echo     -h ^| --help                       prints usage
-echo     --email ^<addresses^>               specify the email recipient of the generated url. Can be comma-separated if there are more than one. NOTE: your need quote the comma separated values like --email "user1@domain1.org,user2@domain2.org"
-echo     --expire-days ^<number-of-days^>    expiry of the auth token. Defaults to ${EXPIRE_DAYS} days.
-echo     --overwrite                       overwrite if output file exists.
-echo     --quiet                           do not print output message.
-echo=
-echo Positional arguments:
-echo     ^<namespace^>                       Mediaflux asset namespace to be downloaded by the scripts. Can be multiple, but must be from the same project.
-echo=
-echo Examples:
-echo     %PROG% --email user1@unimelb.edu.au --expire-days 10 proj-abc-1128.4.999/RAW_DATA proj-abc-1128.4.999/PROCESSED_DATA
-echo=
-exit /b 0
-
-REM function to download from url
-:download
-setlocal
-set url=%~1
-set out=%~2
-if not exist %out% (
-    where powershell >nul 2>nul
-    if %errorlevel% equ 0 (
-        echo downloading %out%
-        powershell -command "(New-Object Net.WebClient).DownloadFile('%url%', '%out%')" >nul 2>nul
-        if %errorlevel% neq 0 (
-            echo error: failed to download %url%
-            exit /b 1
-        )
-        if not exist "%out%" (
-            echo error: failed to download %url% to %out%
-            exit /b 1
-        )
-    ) else (
-        echo error: cannot download %out%. No powershell found.
-        exit /b 1
-    )
-)
+@echo off
+setlocal EnableExtensions EnableDelayedExpansion
+
+REM This is the root namespace for projects to be located in.
+REM The caller specifies only the project ID
+REM Needs to be enhanced to allow for projects in /projects (fails if you change the root to that)
+REM When this script is deployed, you must set the root namespace correctly.
+set NAMESPACE_ROOT=/projects/cryo-em
+
+REM check java
+where java >nul 2>nul
+IF %errorlevel% neq 0 (
+    echo error: no java is found. Install java and retry.
+    exit /b 1
+)
+
+REM script file name
+set PROG=%~0
+
+REM aterm.jar download url
+set ATERM_URL=https://mediaflux.researchsoftware.unimelb.edu.au/mflux/aterm.jar
+
+REM aterm.jar location
+if [%MFLUX_ATERM%]==[] set "MFLUX_ATERM=%~dp0..\..\..\lib\aterm.jar"
+if not exist "%MFLUX_ATERM%" (
+    set "MFLUX_ATERM=%USERPROFILE%\.Arcitecta\aterm.jar"
+    if not exist %USERPROFILE%\.Arcitecta\NUL mkdir "%USERPROFILE%\.Arcitecta"
+    call :download !ATERM_URL! !MFLUX_ATERM!
+    if !errorlevel! neq 0 exit /b 1
+)
+
+REM mflux.cfg location
+if [%MFLUX_CFG%]==[] set "MFLUX_CFG=%USERPROFILE%\.Arcitecta\mflux.cfg" 
+if not exist "%MFLUX_CFG%" set "MFLUX_CFG=%~dp0..\..\..\config\mflux.cfg"
+if not exist "%MFLUX_CFG%" set "MFLUX_CFG=%~dp0..\..\..\mflux.cfg"
+if not exist "%MFLUX_CFG%" set "MFLUX_CFG=%~dp0mflux.cfg"
+if not exist "%MFLUX_CFG%" (
+    set "MFLUX_CFG=%USERPROFILE%\.Arcitecta\mflux.cfg"
+    echo error: could not find %MFLUX_CFG%
+    exit /b 1
+)
+
+REM aterm command prefix
+set "ATERM=java -jar -Dmf.cfg=%MFLUX_CFG% %MFLUX_ATERM% nogui"
+
+REM service name
+set "SERVICE=unimelb.asset.download.shell.script.url.create"
+
+REM command (prefix)
+set "COMMAND=%ATERM% %SERVICE%"
+
+REM default argument values
+set EXPIRE_DAYS=14
+set OVERWRITE=false
+set VERBOSE=true
+
+REM 
+set PROJECT=
+set ROLE=
+set NAMESPACES=
+set EMAILS=
+
+REM parse arguments
+:loop
+if "%~1" NEQ "" (
+    if "%~1"=="--expire-days" (
+        set EXPIRE_DAYS=%~2
+        shift
+        shift
+        goto :loop
+    )
+    if "%~1"=="--overwrite" (
+        set OVERWRITE=true
+        shift
+        goto :loop
+    )
+    if "%~1"=="--quiet" (
+        set VERBOSE=false
+        shift
+        goto :loop
+    )
+    if "%~1"=="--email" (
+        set "value=%~2"
+        set value=!value:,= !
+        for %%e in (!value!) do (
+            if "!EMAILS!"=="" ( set "EMAILS=:to %%e" ) else ( set "EMAILS=!EMAILS! :to %%e" )
+        )
+        shift
+        shift
+        goto :loop
+    )
+    if "%~1"=="-h" (
+        call :usage
+        exit /b 0
+    )
+    if "%~1"=="--help" (
+        call :usage %PROG%
+        exit /b 0
+    )
+
+
+    set ns=%~1
+    if "!ns:~0,18!"=="!NAMESPACE_ROOT!/" (
+        for /f "delims=/" %%c in ("!ns:~18!") do (
+            set "prj=%%c"
+        )
+    ) else (
+        for /f "delims=/" %%c in ("!ns!") do (
+            set "prj=%%c"
+        )
+        set "ns=!NAMESPACE_ROOT!/!ns!"
+    )
+
+    if [!PROJECT!]==[] (
+        set PROJECT=!prj!
+        set "ROLE=!prj!:participant-a"
+    ) else (
+        if not "!PROJECT!"=="!prj!" (
+            echo error: cannot share namespaces from multiple projects.
+            exit /b 1
+        )
+    )
+    if "!NAMESPACES!" EQU "" (
+        set "NAMESPACES=:namespace ^"!ns!^""
+    ) else (
+        set "NAMESPACES=!NAMESPACES! :namespace ^"!ns!^""
+    )
+    shift
+    goto :loop
+)
+
+REM check if namespace is specified
+if "!NAMESPACES!"=="" (
+    echo error: no namespace is specified.
+    call :usage
+    exit /b 1
+)
+REM compose the command 
+set "COMMAND=%COMMAND% :download ^< %NAMESPACES% :token ^< :role -type role %ROLE% :to now+%EXPIRE_DAYS%day ^> :verbose %VERBOSE% :overwrite %OVERWRITE% ^> :token ^< :perm ^< :resource -type role:namespace %PROJECT%: :access ADMINISTER ^> ^>"
+if not "%EMAILS%"=="" (
+    set "COMMAND=%COMMAND% :email ^< %EMAILS% ^>"
+)
+
+REM execute aterm command to generate the script url
+%COMMAND%
+
+if %errorlevel% neq 0 (
+    exit /b 2
+)
+exit /b 0
+
+REM function to print usage
+:usage
+echo=
+echo Usage:
+echo     %PROG% [-h^|--help] [--expire-days ^<number-of-days^>] [--ncsr ^<ncsr^>] [--overwrite] [--quiet] [--email ^<addresses^>] ^<namespace^>
+echo=
+echo Options:
+echo     -h ^| --help                       prints usage
+echo     --email ^<addresses^>               specify the email recipient of the generated url. Can be comma-separated if there are more than one. NOTE: your need quote the comma separated values like --email "user1@domain1.org,user2@domain2.org"
+echo     --expire-days ^<number-of-days^>    expiry of the auth token. Defaults to ${EXPIRE_DAYS} days.
+echo     --overwrite                       overwrite if output file exists.
+echo     --quiet                           do not print output message.
+echo=
+echo Positional arguments:
+echo     ^<namespace^>                       Mediaflux asset namespace to be downloaded by the scripts. Can be multiple, but must be from the same project.
+echo=
+echo Examples:
+echo     %PROG% --email user1@unimelb.edu.au --expire-days 10 proj-abc-1128.4.999/RAW_DATA proj-abc-1128.4.999/PROCESSED_DATA
+echo=
+exit /b 0
+
+REM function to download from url
+:download
+setlocal
+set url=%~1
+set out=%~2
+if not exist %out% (
+    where powershell >nul 2>nul
+    if %errorlevel% equ 0 (
+        echo downloading %out%
+        powershell -command "(New-Object Net.WebClient).DownloadFile('%url%', '%out%')" >nul 2>nul
+        if %errorlevel% neq 0 (
+            echo error: failed to download %url%
+            exit /b 1
+        )
+        if not exist "%out%" (
+            echo error: failed to download %url% to %out%
+            exit /b 1
+        )
+    ) else (
+        echo error: cannot download %out%. No powershell found.
+        exit /b 1
+    )
+)
diff --git a/src/main/scripts/windows/unimelb-mf-import-archive.cmd b/src/main/scripts/windows/unimelb-mf-import-archive.cmd
index ecb93e4e4935bb899dbc9c3136b4b1cb494f3259..8fadaf1fe70d2113bc73c823243a3cf2a5a0baef 100644
--- a/src/main/scripts/windows/unimelb-mf-import-archive.cmd
+++ b/src/main/scripts/windows/unimelb-mf-import-archive.cmd
@@ -1,12 +1,12 @@
-@echo off
-
-pushd %~dp0..\..\
-set ROOT=%cd%
-popd
-
-@REM set JAVA_HOME=%ROOT%\@JAVA_HOME@
-@REM set PATH=%JAVA_HOME%\bin;%PATH%
-
-set JAR=%ROOT%\lib\unimelb-mf-clients.jar
-
-java -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+UseStringDeduplication -Xmx1g -cp "%JAR%" unimelb.mf.client.sync.cli.MFImportArchive %*
+@echo off
+
+pushd %~dp0..\..\
+set ROOT=%cd%
+popd
+
+@REM set JAVA_HOME=%ROOT%\@JAVA_HOME@
+@REM set PATH=%JAVA_HOME%\bin;%PATH%
+
+set JAR=%ROOT%\lib\unimelb-mf-clients.jar
+
+java -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+UseStringDeduplication -Xmx1g -cp "%JAR%" unimelb.mf.client.sync.cli.MFImportArchive %*