deleted all actual file
diff --git a/genomix/genomix-hadoop/actual1/conf.xml b/genomix/genomix-hadoop/actual1/conf.xml
deleted file mode 100644
index ea51bd0..0000000
--- a/genomix/genomix-hadoop/actual1/conf.xml
+++ /dev/null
@@ -1,179 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
-<property><name>fs.s3n.impl</name><value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value></property>
-<property><name>mapred.task.cache.levels</name><value>2</value></property>
-<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value></property>
-<property><name>hadoop.native.lib</name><value>true</value></property>
-<property><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value></property>
-<property><name>dfs.namenode.decommission.nodes.per.interval</name><value>5</value></property>
-<property><name>dfs.https.need.client.auth</name><value>false</value></property>
-<property><name>ipc.client.idlethreshold</name><value>4000</value></property>
-<property><name>mapred.system.dir</name><value>${hadoop.tmp.dir}/mapred/system</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.hours</name><value>0</value></property>
-<property><name>dfs.namenode.logging.level</name><value>info</value></property>
-<property><name>dfs.datanode.address</name><value>127.0.0.1:0</value></property>
-<property><name>io.skip.checksum.errors</name><value>false</value></property>
-<property><name>fs.default.name</name><value>hdfs://localhost:54837</value></property>
-<property><name>mapred.child.tmp</name><value>./tmp</value></property>
-<property><name>fs.har.impl.disable.cache</name><value>true</value></property>
-<property><name>dfs.safemode.threshold.pct</name><value>0.999f</value></property>
-<property><name>mapred.skip.reduce.max.skip.groups</name><value>0</value></property>
-<property><name>dfs.namenode.handler.count</name><value>10</value></property>
-<property><name>dfs.blockreport.initialDelay</name><value>0</value></property>
-<property><name>mapred.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value></property>
-<property><name>mapred.tasktracker.dns.nameserver</name><value>default</value></property>
-<property><name>io.sort.factor</name><value>10</value></property>
-<property><name>mapred.task.timeout</name><value>600000</value></property>
-<property><name>mapred.max.tracker.failures</name><value>4</value></property>
-<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value></property>
-<property><name>fs.hdfs.impl</name><value>org.apache.hadoop.hdfs.DistributedFileSystem</value></property>
-<property><name>mapred.queue.default.acl-administer-jobs</name><value>*</value></property>
-<property><name>mapred.queue.default.acl-submit-job</name><value>*</value></property>
-<property><name>mapred.skip.map.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.mapfile.bloom.size</name><value>1048576</value></property>
-<property><name>dfs.safemode.extension</name><value>0</value></property>
-<property><name>tasktracker.http.threads</name><value>40</value></property>
-<property><name>mapred.job.shuffle.merge.percent</name><value>0.66</value></property>
-<property><name>fs.ftp.impl</name><value>org.apache.hadoop.fs.ftp.FTPFileSystem</value></property>
-<property><name>mapred.output.compress</name><value>false</value></property>
-<property><name>io.bytes.per.checksum</name><value>512</value></property>
-<property><name>topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.StaticMapping</value></property>
-<property><name>dfs.https.server.keystore.resource</name><value>ssl-server.xml</value></property>
-<property><name>mapred.reduce.slowstart.completed.maps</name><value>0.05</value></property>
-<property><name>mapred.reduce.max.attempts</name><value>4</value></property>
-<property><name>fs.ramfs.impl</name><value>org.apache.hadoop.fs.InMemoryFileSystem</value></property>
-<property><name>dfs.name.edits.dir</name><value>${dfs.name.dir}</value></property>
-<property><name>mapred.skip.map.max.skip.records</name><value>0</value></property>
-<property><name>hadoop.job.ugi</name><value>hadoop,staff,everyone,localaccounts</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value></property>
-<property><name>dfs.block.size</name><value>67108864</value></property>
-<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value></property>
-<property><name>job.end.retry.attempts</name><value>0</value></property>
-<property><name>fs.file.impl</name><value>org.apache.hadoop.fs.LocalFileSystem</value></property>
-<property><name>mapred.local.dir.minspacestart</name><value>0</value></property>
-<property><name>mapred.output.compression.type</name><value>RECORD</value></property>
-<property><name>dfs.datanode.ipc.address</name><value>127.0.0.1:0</value></property>
-<property><name>dfs.permissions</name><value>true</value></property>
-<property><name>topology.script.number.args</name><value>100</value></property>
-<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value></property>
-<property><name>mapred.max.tracker.blacklists</name><value>4</value></property>
-<property><name>mapred.task.profile.maps</name><value>0-2</value></property>
-<property><name>dfs.datanode.https.address</name><value>0.0.0.0:50475</value></property>
-<property><name>mapred.userlog.retain.hours</name><value>24</value></property>
-<property><name>dfs.secondary.http.address</name><value>0.0.0.0:50090</value></property>
-<property><name>dfs.replication.max</name><value>512</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.active</name><value>false</value></property>
-<property><name>hadoop.security.authorization</name><value>false</value></property>
-<property><name>local.cache.size</name><value>10737418240</value></property>
-<property><name>mapred.min.split.size</name><value>0</value></property>
-<property><name>mapred.map.tasks</name><value>2</value></property>
-<property><name>mapred.child.java.opts</name><value>-Xmx200m</value></property>
-<property><name>dfs.https.client.keystore.resource</name><value>ssl-client.xml</value></property>
-<property><name>dfs.namenode.startup</name><value>REGULAR</value></property>
-<property><name>mapred.job.queue.name</name><value>default</value></property>
-<property><name>dfs.https.address</name><value>0.0.0.0:50470</value></property>
-<property><name>dfs.balance.bandwidthPerSec</name><value>1048576</value></property>
-<property><name>ipc.server.listen.queue.size</name><value>128</value></property>
-<property><name>mapred.inmem.merge.threshold</name><value>1000</value></property>
-<property><name>job.end.retry.interval</name><value>30000</value></property>
-<property><name>mapred.skip.attempts.to.start.skipping</name><value>2</value></property>
-<property><name>fs.checkpoint.dir</name><value>build/test/data/dfs/namesecondary1,build/test/data/dfs/namesecondary2</value></property>
-<property><name>mapred.reduce.tasks</name><value>1</value></property>
-<property><name>mapred.merge.recordsBeforeProgress</name><value>10000</value></property>
-<property><name>mapred.userlog.limit.kb</name><value>0</value></property>
-<property><name>dfs.max.objects</name><value>0</value></property>
-<property><name>webinterface.private.actions</name><value>false</value></property>
-<property><name>io.sort.spill.percent</name><value>0.80</value></property>
-<property><name>mapred.job.shuffle.input.buffer.percent</name><value>0.70</value></property>
-<property><name>dfs.datanode.dns.nameserver</name><value>default</value></property>
-<property><name>mapred.map.tasks.speculative.execution</name><value>true</value></property>
-<property><name>hadoop.util.hash.type</name><value>murmur</value></property>
-<property><name>dfs.blockreport.intervalMsec</name><value>3600000</value></property>
-<property><name>mapred.map.max.attempts</name><value>4</value></property>
-<property><name>dfs.client.block.write.retries</name><value>3</value></property>
-<property><name>mapred.job.tracker.handler.count</name><value>10</value></property>
-<property><name>mapred.tasktracker.expiry.interval</name><value>600000</value></property>
-<property><name>dfs.https.enable</name><value>false</value></property>
-<property><name>mapred.jobtracker.maxtasks.per.job</name><value>-1</value></property>
-<property><name>mapred.jobtracker.job.history.block.size</name><value>3145728</value></property>
-<property><name>keep.failed.task.files</name><value>false</value></property>
-<property><name>ipc.client.tcpnodelay</name><value>false</value></property>
-<property><name>mapred.task.profile.reduces</name><value>0-2</value></property>
-<property><name>mapred.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>io.map.index.skip</name><value>0</value></property>
-<property><name>ipc.server.tcpnodelay</name><value>false</value></property>
-<property><name>dfs.default.chunk.view.size</name><value>32768</value></property>
-<property><name>hadoop.logfile.size</name><value>10000000</value></property>
-<property><name>mapred.reduce.tasks.speculative.execution</name><value>true</value></property>
-<property><name>dfs.datanode.du.reserved</name><value>0</value></property>
-<property><name>fs.checkpoint.period</name><value>3600</value></property>
-<property><name>dfs.web.ugi</name><value>webuser,webgroup</value></property>
-<property><name>mapred.job.reuse.jvm.num.tasks</name><value>1</value></property>
-<property><name>mapred.jobtracker.completeuserjobs.maximum</name><value>100</value></property>
-<property><name>dfs.df.interval</name><value>60000</value></property>
-<property><name>dfs.data.dir</name><value>${hadoop.tmp.dir}/dfs/data</value></property>
-<property><name>fs.s3.maxRetries</name><value>4</value></property>
-<property><name>dfs.datanode.dns.interface</name><value>default</value></property>
-<property><name>dfs.support.append</name><value>false</value></property>
-<property><name>dfs.permissions.supergroup</name><value>supergroup</value></property>
-<property><name>mapred.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value></property>
-<property><name>fs.hftp.impl</name><value>org.apache.hadoop.hdfs.HftpFileSystem</value></property>
-<property><name>fs.trash.interval</name><value>0</value></property>
-<property><name>fs.s3.sleepTimeSeconds</name><value>10</value></property>
-<property><name>dfs.replication.min</name><value>1</value></property>
-<property><name>mapred.submit.replication</name><value>10</value></property>
-<property><name>fs.har.impl</name><value>org.apache.hadoop.fs.HarFileSystem</value></property>
-<property><name>mapred.map.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>mapred.tasktracker.dns.interface</name><value>default</value></property>
-<property><name>dfs.namenode.decommission.interval</name><value>3</value></property>
-<property><name>dfs.http.address</name><value>localhost:54838</value></property>
-<property><name>dfs.heartbeat.interval</name><value>3</value></property>
-<property><name>mapred.job.tracker</name><value>local</value></property>
-<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
-<property><name>dfs.name.dir</name><value>build/test/data/dfs/name1,build/test/data/dfs/name2</value></property>
-<property><name>mapred.line.input.format.linespermap</name><value>1</value></property>
-<property><name>mapred.jobtracker.taskScheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value></property>
-<property><name>mapred.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value></property>
-<property><name>dfs.datanode.http.address</name><value>127.0.0.1:0</value></property>
-<property><name>mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</name><value>5000</value></property>
-<property><name>mapred.local.dir.minspacekill</name><value>0</value></property>
-<property><name>dfs.replication.interval</name><value>3</value></property>
-<property><name>io.sort.record.percent</name><value>0.05</value></property>
-<property><name>fs.kfs.impl</name><value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value></property>
-<property><name>mapred.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value></property>
-<property><name>mapred.tasktracker.reduce.tasks.maximum</name><value>2</value></property>
-<property><name>dfs.replication</name><value>2</value></property>
-<property><name>fs.checkpoint.edits.dir</name><value>${fs.checkpoint.dir}</value></property>
-<property><name>mapred.job.reduce.input.buffer.percent</name><value>0.0</value></property>
-<property><name>mapred.tasktracker.indexcache.mb</name><value>10</value></property>
-<property><name>hadoop.logfile.count</name><value>10</value></property>
-<property><name>mapred.skip.reduce.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.seqfile.compress.blocksize</name><value>1000000</value></property>
-<property><name>fs.s3.block.size</name><value>67108864</value></property>
-<property><name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name><value>5000</value></property>
-<property><name>mapred.acls.enabled</name><value>false</value></property>
-<property><name>mapred.queue.names</name><value>default</value></property>
-<property><name>dfs.access.time.precision</name><value>3600000</value></property>
-<property><name>fs.hsftp.impl</name><value>org.apache.hadoop.hdfs.HsftpFileSystem</value></property>
-<property><name>mapred.task.tracker.http.address</name><value>0.0.0.0:50060</value></property>
-<property><name>mapred.reduce.parallel.copies</name><value>5</value></property>
-<property><name>io.seqfile.lazydecompress</name><value>true</value></property>
-<property><name>io.sort.mb</name><value>100</value></property>
-<property><name>ipc.client.connection.maxidletime</name><value>10000</value></property>
-<property><name>mapred.compress.map.output</name><value>false</value></property>
-<property><name>mapred.task.tracker.report.address</name><value>127.0.0.1:0</value></property>
-<property><name>ipc.client.kill.max</name><value>10</value></property>
-<property><name>ipc.client.connect.max.retries</name><value>10</value></property>
-<property><name>fs.s3.impl</name><value>org.apache.hadoop.fs.s3.S3FileSystem</value></property>
-<property><name>mapred.job.tracker.http.address</name><value>0.0.0.0:50030</value></property>
-<property><name>io.file.buffer.size</name><value>4096</value></property>
-<property><name>mapred.jobtracker.restart.recover</name><value>false</value></property>
-<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization</value></property>
-<property><name>dfs.datanode.handler.count</name><value>3</value></property>
-<property><name>mapred.reduce.copy.backoff</name><value>300</value></property>
-<property><name>mapred.task.profile</name><value>false</value></property>
-<property><name>dfs.replication.considerLoad</name><value>true</value></property>
-<property><name>jobclient.output.filter</name><value>FAILED</value></property>
-<property><name>mapred.tasktracker.map.tasks.maximum</name><value>2</value></property>
-<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value></property>
-<property><name>fs.checkpoint.size</name><value>67108864</value></property>
-</configuration>
\ No newline at end of file
diff --git a/genomix/genomix-hadoop/actual1/result1/.part-00000.crc b/genomix/genomix-hadoop/actual1/result1/.part-00000.crc
deleted file mode 100644
index 93d0276..0000000
--- a/genomix/genomix-hadoop/actual1/result1/.part-00000.crc
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/actual1/result1/part-00000 b/genomix/genomix-hadoop/actual1/result1/part-00000
deleted file mode 100755
index 57a1fd6..0000000
--- a/genomix/genomix-hadoop/actual1/result1/part-00000
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/actual2/conf.xml b/genomix/genomix-hadoop/actual2/conf.xml
deleted file mode 100644
index e8ae529..0000000
--- a/genomix/genomix-hadoop/actual2/conf.xml
+++ /dev/null
@@ -1,179 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
-<property><name>fs.s3n.impl</name><value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value></property>
-<property><name>mapred.task.cache.levels</name><value>2</value></property>
-<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value></property>
-<property><name>hadoop.native.lib</name><value>true</value></property>
-<property><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value></property>
-<property><name>dfs.namenode.decommission.nodes.per.interval</name><value>5</value></property>
-<property><name>dfs.https.need.client.auth</name><value>false</value></property>
-<property><name>ipc.client.idlethreshold</name><value>4000</value></property>
-<property><name>mapred.system.dir</name><value>${hadoop.tmp.dir}/mapred/system</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.hours</name><value>0</value></property>
-<property><name>dfs.namenode.logging.level</name><value>info</value></property>
-<property><name>dfs.datanode.address</name><value>127.0.0.1:0</value></property>
-<property><name>io.skip.checksum.errors</name><value>false</value></property>
-<property><name>fs.default.name</name><value>hdfs://localhost:54877</value></property>
-<property><name>mapred.child.tmp</name><value>./tmp</value></property>
-<property><name>fs.har.impl.disable.cache</name><value>true</value></property>
-<property><name>dfs.safemode.threshold.pct</name><value>0.999f</value></property>
-<property><name>mapred.skip.reduce.max.skip.groups</name><value>0</value></property>
-<property><name>dfs.namenode.handler.count</name><value>10</value></property>
-<property><name>dfs.blockreport.initialDelay</name><value>0</value></property>
-<property><name>mapred.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value></property>
-<property><name>mapred.tasktracker.dns.nameserver</name><value>default</value></property>
-<property><name>io.sort.factor</name><value>10</value></property>
-<property><name>mapred.task.timeout</name><value>600000</value></property>
-<property><name>mapred.max.tracker.failures</name><value>4</value></property>
-<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value></property>
-<property><name>fs.hdfs.impl</name><value>org.apache.hadoop.hdfs.DistributedFileSystem</value></property>
-<property><name>mapred.queue.default.acl-administer-jobs</name><value>*</value></property>
-<property><name>mapred.queue.default.acl-submit-job</name><value>*</value></property>
-<property><name>mapred.skip.map.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.mapfile.bloom.size</name><value>1048576</value></property>
-<property><name>dfs.safemode.extension</name><value>0</value></property>
-<property><name>tasktracker.http.threads</name><value>40</value></property>
-<property><name>mapred.job.shuffle.merge.percent</name><value>0.66</value></property>
-<property><name>fs.ftp.impl</name><value>org.apache.hadoop.fs.ftp.FTPFileSystem</value></property>
-<property><name>mapred.output.compress</name><value>false</value></property>
-<property><name>io.bytes.per.checksum</name><value>512</value></property>
-<property><name>topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.StaticMapping</value></property>
-<property><name>dfs.https.server.keystore.resource</name><value>ssl-server.xml</value></property>
-<property><name>mapred.reduce.slowstart.completed.maps</name><value>0.05</value></property>
-<property><name>mapred.reduce.max.attempts</name><value>4</value></property>
-<property><name>fs.ramfs.impl</name><value>org.apache.hadoop.fs.InMemoryFileSystem</value></property>
-<property><name>dfs.name.edits.dir</name><value>${dfs.name.dir}</value></property>
-<property><name>mapred.skip.map.max.skip.records</name><value>0</value></property>
-<property><name>hadoop.job.ugi</name><value>hadoop,staff,everyone,localaccounts</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value></property>
-<property><name>dfs.block.size</name><value>67108864</value></property>
-<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value></property>
-<property><name>job.end.retry.attempts</name><value>0</value></property>
-<property><name>fs.file.impl</name><value>org.apache.hadoop.fs.LocalFileSystem</value></property>
-<property><name>mapred.local.dir.minspacestart</name><value>0</value></property>
-<property><name>mapred.output.compression.type</name><value>RECORD</value></property>
-<property><name>dfs.datanode.ipc.address</name><value>127.0.0.1:0</value></property>
-<property><name>dfs.permissions</name><value>true</value></property>
-<property><name>topology.script.number.args</name><value>100</value></property>
-<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value></property>
-<property><name>mapred.max.tracker.blacklists</name><value>4</value></property>
-<property><name>mapred.task.profile.maps</name><value>0-2</value></property>
-<property><name>dfs.datanode.https.address</name><value>0.0.0.0:50475</value></property>
-<property><name>mapred.userlog.retain.hours</name><value>24</value></property>
-<property><name>dfs.secondary.http.address</name><value>0.0.0.0:50090</value></property>
-<property><name>dfs.replication.max</name><value>512</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.active</name><value>false</value></property>
-<property><name>hadoop.security.authorization</name><value>false</value></property>
-<property><name>local.cache.size</name><value>10737418240</value></property>
-<property><name>mapred.min.split.size</name><value>0</value></property>
-<property><name>mapred.map.tasks</name><value>2</value></property>
-<property><name>mapred.child.java.opts</name><value>-Xmx200m</value></property>
-<property><name>dfs.https.client.keystore.resource</name><value>ssl-client.xml</value></property>
-<property><name>dfs.namenode.startup</name><value>REGULAR</value></property>
-<property><name>mapred.job.queue.name</name><value>default</value></property>
-<property><name>dfs.https.address</name><value>0.0.0.0:50470</value></property>
-<property><name>dfs.balance.bandwidthPerSec</name><value>1048576</value></property>
-<property><name>ipc.server.listen.queue.size</name><value>128</value></property>
-<property><name>mapred.inmem.merge.threshold</name><value>1000</value></property>
-<property><name>job.end.retry.interval</name><value>30000</value></property>
-<property><name>mapred.skip.attempts.to.start.skipping</name><value>2</value></property>
-<property><name>fs.checkpoint.dir</name><value>build/test/data/dfs/namesecondary1,build/test/data/dfs/namesecondary2</value></property>
-<property><name>mapred.reduce.tasks</name><value>1</value></property>
-<property><name>mapred.merge.recordsBeforeProgress</name><value>10000</value></property>
-<property><name>mapred.userlog.limit.kb</name><value>0</value></property>
-<property><name>dfs.max.objects</name><value>0</value></property>
-<property><name>webinterface.private.actions</name><value>false</value></property>
-<property><name>io.sort.spill.percent</name><value>0.80</value></property>
-<property><name>mapred.job.shuffle.input.buffer.percent</name><value>0.70</value></property>
-<property><name>dfs.datanode.dns.nameserver</name><value>default</value></property>
-<property><name>mapred.map.tasks.speculative.execution</name><value>true</value></property>
-<property><name>hadoop.util.hash.type</name><value>murmur</value></property>
-<property><name>dfs.blockreport.intervalMsec</name><value>3600000</value></property>
-<property><name>mapred.map.max.attempts</name><value>4</value></property>
-<property><name>dfs.client.block.write.retries</name><value>3</value></property>
-<property><name>mapred.job.tracker.handler.count</name><value>10</value></property>
-<property><name>mapred.tasktracker.expiry.interval</name><value>600000</value></property>
-<property><name>dfs.https.enable</name><value>false</value></property>
-<property><name>mapred.jobtracker.maxtasks.per.job</name><value>-1</value></property>
-<property><name>mapred.jobtracker.job.history.block.size</name><value>3145728</value></property>
-<property><name>keep.failed.task.files</name><value>false</value></property>
-<property><name>ipc.client.tcpnodelay</name><value>false</value></property>
-<property><name>mapred.task.profile.reduces</name><value>0-2</value></property>
-<property><name>mapred.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>io.map.index.skip</name><value>0</value></property>
-<property><name>ipc.server.tcpnodelay</name><value>false</value></property>
-<property><name>dfs.default.chunk.view.size</name><value>32768</value></property>
-<property><name>hadoop.logfile.size</name><value>10000000</value></property>
-<property><name>mapred.reduce.tasks.speculative.execution</name><value>true</value></property>
-<property><name>dfs.datanode.du.reserved</name><value>0</value></property>
-<property><name>fs.checkpoint.period</name><value>3600</value></property>
-<property><name>dfs.web.ugi</name><value>webuser,webgroup</value></property>
-<property><name>mapred.job.reuse.jvm.num.tasks</name><value>1</value></property>
-<property><name>mapred.jobtracker.completeuserjobs.maximum</name><value>100</value></property>
-<property><name>dfs.df.interval</name><value>60000</value></property>
-<property><name>dfs.data.dir</name><value>${hadoop.tmp.dir}/dfs/data</value></property>
-<property><name>fs.s3.maxRetries</name><value>4</value></property>
-<property><name>dfs.datanode.dns.interface</name><value>default</value></property>
-<property><name>dfs.support.append</name><value>false</value></property>
-<property><name>dfs.permissions.supergroup</name><value>supergroup</value></property>
-<property><name>mapred.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value></property>
-<property><name>fs.hftp.impl</name><value>org.apache.hadoop.hdfs.HftpFileSystem</value></property>
-<property><name>fs.trash.interval</name><value>0</value></property>
-<property><name>fs.s3.sleepTimeSeconds</name><value>10</value></property>
-<property><name>dfs.replication.min</name><value>1</value></property>
-<property><name>mapred.submit.replication</name><value>10</value></property>
-<property><name>fs.har.impl</name><value>org.apache.hadoop.fs.HarFileSystem</value></property>
-<property><name>mapred.map.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>mapred.tasktracker.dns.interface</name><value>default</value></property>
-<property><name>dfs.namenode.decommission.interval</name><value>3</value></property>
-<property><name>dfs.http.address</name><value>localhost:54878</value></property>
-<property><name>dfs.heartbeat.interval</name><value>3</value></property>
-<property><name>mapred.job.tracker</name><value>local</value></property>
-<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
-<property><name>dfs.name.dir</name><value>build/test/data/dfs/name1,build/test/data/dfs/name2</value></property>
-<property><name>mapred.line.input.format.linespermap</name><value>1</value></property>
-<property><name>mapred.jobtracker.taskScheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value></property>
-<property><name>mapred.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value></property>
-<property><name>dfs.datanode.http.address</name><value>127.0.0.1:0</value></property>
-<property><name>mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</name><value>5000</value></property>
-<property><name>mapred.local.dir.minspacekill</name><value>0</value></property>
-<property><name>dfs.replication.interval</name><value>3</value></property>
-<property><name>io.sort.record.percent</name><value>0.05</value></property>
-<property><name>fs.kfs.impl</name><value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value></property>
-<property><name>mapred.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value></property>
-<property><name>mapred.tasktracker.reduce.tasks.maximum</name><value>2</value></property>
-<property><name>dfs.replication</name><value>2</value></property>
-<property><name>fs.checkpoint.edits.dir</name><value>${fs.checkpoint.dir}</value></property>
-<property><name>mapred.job.reduce.input.buffer.percent</name><value>0.0</value></property>
-<property><name>mapred.tasktracker.indexcache.mb</name><value>10</value></property>
-<property><name>hadoop.logfile.count</name><value>10</value></property>
-<property><name>mapred.skip.reduce.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.seqfile.compress.blocksize</name><value>1000000</value></property>
-<property><name>fs.s3.block.size</name><value>67108864</value></property>
-<property><name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name><value>5000</value></property>
-<property><name>mapred.acls.enabled</name><value>false</value></property>
-<property><name>mapred.queue.names</name><value>default</value></property>
-<property><name>dfs.access.time.precision</name><value>3600000</value></property>
-<property><name>fs.hsftp.impl</name><value>org.apache.hadoop.hdfs.HsftpFileSystem</value></property>
-<property><name>mapred.task.tracker.http.address</name><value>0.0.0.0:50060</value></property>
-<property><name>mapred.reduce.parallel.copies</name><value>5</value></property>
-<property><name>io.seqfile.lazydecompress</name><value>true</value></property>
-<property><name>io.sort.mb</name><value>100</value></property>
-<property><name>ipc.client.connection.maxidletime</name><value>10000</value></property>
-<property><name>mapred.compress.map.output</name><value>false</value></property>
-<property><name>mapred.task.tracker.report.address</name><value>127.0.0.1:0</value></property>
-<property><name>ipc.client.kill.max</name><value>10</value></property>
-<property><name>ipc.client.connect.max.retries</name><value>10</value></property>
-<property><name>fs.s3.impl</name><value>org.apache.hadoop.fs.s3.S3FileSystem</value></property>
-<property><name>mapred.job.tracker.http.address</name><value>0.0.0.0:50030</value></property>
-<property><name>io.file.buffer.size</name><value>4096</value></property>
-<property><name>mapred.jobtracker.restart.recover</name><value>false</value></property>
-<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization</value></property>
-<property><name>dfs.datanode.handler.count</name><value>3</value></property>
-<property><name>mapred.reduce.copy.backoff</name><value>300</value></property>
-<property><name>mapred.task.profile</name><value>false</value></property>
-<property><name>dfs.replication.considerLoad</name><value>true</value></property>
-<property><name>jobclient.output.filter</name><value>FAILED</value></property>
-<property><name>mapred.tasktracker.map.tasks.maximum</name><value>2</value></property>
-<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value></property>
-<property><name>fs.checkpoint.size</name><value>67108864</value></property>
-</configuration>
\ No newline at end of file
diff --git a/genomix/genomix-hadoop/actual2/result2/.part-00000.crc b/genomix/genomix-hadoop/actual2/result2/.part-00000.crc
deleted file mode 100644
index 9019bf8..0000000
--- a/genomix/genomix-hadoop/actual2/result2/.part-00000.crc
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/actual2/result2/part-00000 b/genomix/genomix-hadoop/actual2/result2/part-00000
deleted file mode 100755
index c72ced3..0000000
--- a/genomix/genomix-hadoop/actual2/result2/part-00000
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/actual3/complete2/.complete2-r-00000.crc b/genomix/genomix-hadoop/actual3/complete2/.complete2-r-00000.crc
deleted file mode 100644
index bccc1b3..0000000
--- a/genomix/genomix-hadoop/actual3/complete2/.complete2-r-00000.crc
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/actual3/complete2/complete2-r-00000 b/genomix/genomix-hadoop/actual3/complete2/complete2-r-00000
deleted file mode 100755
index 2704be9..0000000
--- a/genomix/genomix-hadoop/actual3/complete2/complete2-r-00000
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/actual3/conf.xml b/genomix/genomix-hadoop/actual3/conf.xml
deleted file mode 100644
index 3434825..0000000
--- a/genomix/genomix-hadoop/actual3/conf.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
-<property><name>fs.s3n.impl</name><value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value></property>
-<property><name>mapred.task.cache.levels</name><value>2</value></property>
-<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value></property>
-<property><name>hadoop.native.lib</name><value>true</value></property>
-<property><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value></property>
-<property><name>dfs.namenode.decommission.nodes.per.interval</name><value>5</value></property>
-<property><name>dfs.https.need.client.auth</name><value>false</value></property>
-<property><name>ipc.client.idlethreshold</name><value>4000</value></property>
-<property><name>mapred.system.dir</name><value>${hadoop.tmp.dir}/mapred/system</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.hours</name><value>0</value></property>
-<property><name>dfs.namenode.logging.level</name><value>info</value></property>
-<property><name>dfs.datanode.address</name><value>127.0.0.1:0</value></property>
-<property><name>io.skip.checksum.errors</name><value>false</value></property>
-<<<<<<< Updated upstream
-<property><name>fs.default.name</name><value>hdfs://localhost:55383</value></property>
-=======
-<property><name>fs.default.name</name><value>hdfs://localhost:50310</value></property>
->>>>>>> Stashed changes
-<property><name>mapred.child.tmp</name><value>./tmp</value></property>
-<property><name>fs.har.impl.disable.cache</name><value>true</value></property>
-<property><name>dfs.safemode.threshold.pct</name><value>0.999f</value></property>
-<property><name>mapred.skip.reduce.max.skip.groups</name><value>0</value></property>
-<property><name>dfs.namenode.handler.count</name><value>10</value></property>
-<property><name>dfs.blockreport.initialDelay</name><value>0</value></property>
-<property><name>mapred.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value></property>
-<property><name>mapred.tasktracker.dns.nameserver</name><value>default</value></property>
-<property><name>io.sort.factor</name><value>10</value></property>
-<property><name>mapred.task.timeout</name><value>600000</value></property>
-<property><name>mapred.max.tracker.failures</name><value>4</value></property>
-<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value></property>
-<property><name>fs.hdfs.impl</name><value>org.apache.hadoop.hdfs.DistributedFileSystem</value></property>
-<property><name>mapred.queue.default.acl-administer-jobs</name><value>*</value></property>
-<property><name>mapred.queue.default.acl-submit-job</name><value>*</value></property>
-<property><name>mapred.skip.map.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.mapfile.bloom.size</name><value>1048576</value></property>
-<property><name>dfs.safemode.extension</name><value>0</value></property>
-<property><name>tasktracker.http.threads</name><value>40</value></property>
-<property><name>mapred.job.shuffle.merge.percent</name><value>0.66</value></property>
-<property><name>fs.ftp.impl</name><value>org.apache.hadoop.fs.ftp.FTPFileSystem</value></property>
-<property><name>mapred.output.compress</name><value>false</value></property>
-<property><name>io.bytes.per.checksum</name><value>512</value></property>
-<property><name>topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.StaticMapping</value></property>
-<property><name>dfs.https.server.keystore.resource</name><value>ssl-server.xml</value></property>
-<property><name>mapred.reduce.slowstart.completed.maps</name><value>0.05</value></property>
-<property><name>mapred.reduce.max.attempts</name><value>4</value></property>
-<property><name>fs.ramfs.impl</name><value>org.apache.hadoop.fs.InMemoryFileSystem</value></property>
-<property><name>dfs.name.edits.dir</name><value>${dfs.name.dir}</value></property>
-<property><name>mapred.skip.map.max.skip.records</name><value>0</value></property>
-<property><name>hadoop.job.ugi</name><value>hadoop,staff,everyone,localaccounts</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value></property>
-<property><name>dfs.block.size</name><value>67108864</value></property>
-<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value></property>
-<property><name>job.end.retry.attempts</name><value>0</value></property>
-<property><name>fs.file.impl</name><value>org.apache.hadoop.fs.LocalFileSystem</value></property>
-<property><name>mapred.local.dir.minspacestart</name><value>0</value></property>
-<property><name>mapred.output.compression.type</name><value>RECORD</value></property>
-<property><name>dfs.datanode.ipc.address</name><value>127.0.0.1:0</value></property>
-<property><name>dfs.permissions</name><value>true</value></property>
-<property><name>topology.script.number.args</name><value>100</value></property>
-<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value></property>
-<property><name>mapred.max.tracker.blacklists</name><value>4</value></property>
-<property><name>mapred.task.profile.maps</name><value>0-2</value></property>
-<property><name>dfs.datanode.https.address</name><value>0.0.0.0:50475</value></property>
-<property><name>mapred.userlog.retain.hours</name><value>24</value></property>
-<property><name>dfs.secondary.http.address</name><value>0.0.0.0:50090</value></property>
-<property><name>dfs.replication.max</name><value>512</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.active</name><value>false</value></property>
-<property><name>hadoop.security.authorization</name><value>false</value></property>
-<property><name>local.cache.size</name><value>10737418240</value></property>
-<property><name>mapred.min.split.size</name><value>0</value></property>
-<property><name>mapred.map.tasks</name><value>2</value></property>
-<property><name>mapred.child.java.opts</name><value>-Xmx200m</value></property>
-<property><name>dfs.https.client.keystore.resource</name><value>ssl-client.xml</value></property>
-<property><name>dfs.namenode.startup</name><value>REGULAR</value></property>
-<property><name>mapred.job.queue.name</name><value>default</value></property>
-<property><name>dfs.https.address</name><value>0.0.0.0:50470</value></property>
-<property><name>dfs.balance.bandwidthPerSec</name><value>1048576</value></property>
-<property><name>ipc.server.listen.queue.size</name><value>128</value></property>
-<property><name>mapred.inmem.merge.threshold</name><value>1000</value></property>
-<property><name>job.end.retry.interval</name><value>30000</value></property>
-<property><name>mapred.skip.attempts.to.start.skipping</name><value>2</value></property>
-<property><name>fs.checkpoint.dir</name><value>build/test/data/dfs/namesecondary1,build/test/data/dfs/namesecondary2</value></property>
-<property><name>mapred.reduce.tasks</name><value>1</value></property>
-<property><name>mapred.merge.recordsBeforeProgress</name><value>10000</value></property>
-<property><name>mapred.userlog.limit.kb</name><value>0</value></property>
-<property><name>dfs.max.objects</name><value>0</value></property>
-<property><name>webinterface.private.actions</name><value>false</value></property>
-<property><name>io.sort.spill.percent</name><value>0.80</value></property>
-<property><name>mapred.job.shuffle.input.buffer.percent</name><value>0.70</value></property>
-<property><name>dfs.datanode.dns.nameserver</name><value>default</value></property>
-<property><name>mapred.map.tasks.speculative.execution</name><value>true</value></property>
-<property><name>hadoop.util.hash.type</name><value>murmur</value></property>
-<property><name>dfs.blockreport.intervalMsec</name><value>3600000</value></property>
-<property><name>mapred.map.max.attempts</name><value>4</value></property>
-<property><name>dfs.client.block.write.retries</name><value>3</value></property>
-<property><name>mapred.job.tracker.handler.count</name><value>10</value></property>
-<property><name>mapred.tasktracker.expiry.interval</name><value>600000</value></property>
-<property><name>dfs.https.enable</name><value>false</value></property>
-<property><name>mapred.jobtracker.maxtasks.per.job</name><value>-1</value></property>
-<property><name>mapred.jobtracker.job.history.block.size</name><value>3145728</value></property>
-<property><name>keep.failed.task.files</name><value>false</value></property>
-<property><name>ipc.client.tcpnodelay</name><value>false</value></property>
-<property><name>mapred.task.profile.reduces</name><value>0-2</value></property>
-<property><name>mapred.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>io.map.index.skip</name><value>0</value></property>
-<property><name>ipc.server.tcpnodelay</name><value>false</value></property>
-<property><name>dfs.default.chunk.view.size</name><value>32768</value></property>
-<property><name>hadoop.logfile.size</name><value>10000000</value></property>
-<property><name>mapred.reduce.tasks.speculative.execution</name><value>true</value></property>
-<property><name>dfs.datanode.du.reserved</name><value>0</value></property>
-<property><name>fs.checkpoint.period</name><value>3600</value></property>
-<property><name>dfs.web.ugi</name><value>webuser,webgroup</value></property>
-<property><name>mapred.job.reuse.jvm.num.tasks</name><value>1</value></property>
-<property><name>mapred.jobtracker.completeuserjobs.maximum</name><value>100</value></property>
-<property><name>dfs.df.interval</name><value>60000</value></property>
-<property><name>dfs.data.dir</name><value>${hadoop.tmp.dir}/dfs/data</value></property>
-<property><name>fs.s3.maxRetries</name><value>4</value></property>
-<property><name>dfs.datanode.dns.interface</name><value>default</value></property>
-<property><name>dfs.support.append</name><value>false</value></property>
-<property><name>dfs.permissions.supergroup</name><value>supergroup</value></property>
-<property><name>mapred.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value></property>
-<property><name>fs.hftp.impl</name><value>org.apache.hadoop.hdfs.HftpFileSystem</value></property>
-<property><name>fs.trash.interval</name><value>0</value></property>
-<property><name>fs.s3.sleepTimeSeconds</name><value>10</value></property>
-<property><name>dfs.replication.min</name><value>1</value></property>
-<property><name>mapred.submit.replication</name><value>10</value></property>
-<property><name>fs.har.impl</name><value>org.apache.hadoop.fs.HarFileSystem</value></property>
-<property><name>mapred.map.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>mapred.tasktracker.dns.interface</name><value>default</value></property>
-<property><name>dfs.namenode.decommission.interval</name><value>3</value></property>
-<<<<<<< Updated upstream
-<property><name>dfs.http.address</name><value>localhost:55384</value></property>
-=======
-<property><name>dfs.http.address</name><value>localhost:50311</value></property>
->>>>>>> Stashed changes
-<property><name>dfs.heartbeat.interval</name><value>3</value></property>
-<property><name>mapred.job.tracker</name><value>local</value></property>
-<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
-<property><name>dfs.name.dir</name><value>build/test/data/dfs/name1,build/test/data/dfs/name2</value></property>
-<property><name>mapred.line.input.format.linespermap</name><value>1</value></property>
-<property><name>mapred.jobtracker.taskScheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value></property>
-<property><name>mapred.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value></property>
-<property><name>dfs.datanode.http.address</name><value>127.0.0.1:0</value></property>
-<property><name>mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</name><value>5000</value></property>
-<property><name>mapred.local.dir.minspacekill</name><value>0</value></property>
-<property><name>dfs.replication.interval</name><value>3</value></property>
-<property><name>io.sort.record.percent</name><value>0.05</value></property>
-<property><name>fs.kfs.impl</name><value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value></property>
-<property><name>mapred.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value></property>
-<property><name>mapred.tasktracker.reduce.tasks.maximum</name><value>2</value></property>
-<property><name>dfs.replication</name><value>2</value></property>
-<property><name>fs.checkpoint.edits.dir</name><value>${fs.checkpoint.dir}</value></property>
-<property><name>mapred.job.reduce.input.buffer.percent</name><value>0.0</value></property>
-<property><name>mapred.tasktracker.indexcache.mb</name><value>10</value></property>
-<property><name>hadoop.logfile.count</name><value>10</value></property>
-<property><name>mapred.skip.reduce.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.seqfile.compress.blocksize</name><value>1000000</value></property>
-<property><name>fs.s3.block.size</name><value>67108864</value></property>
-<property><name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name><value>5000</value></property>
-<property><name>mapred.acls.enabled</name><value>false</value></property>
-<property><name>mapred.queue.names</name><value>default</value></property>
-<property><name>dfs.access.time.precision</name><value>3600000</value></property>
-<property><name>fs.hsftp.impl</name><value>org.apache.hadoop.hdfs.HsftpFileSystem</value></property>
-<property><name>mapred.task.tracker.http.address</name><value>0.0.0.0:50060</value></property>
-<property><name>mapred.reduce.parallel.copies</name><value>5</value></property>
-<property><name>io.seqfile.lazydecompress</name><value>true</value></property>
-<property><name>io.sort.mb</name><value>100</value></property>
-<property><name>ipc.client.connection.maxidletime</name><value>10000</value></property>
-<property><name>mapred.compress.map.output</name><value>false</value></property>
-<property><name>mapred.task.tracker.report.address</name><value>127.0.0.1:0</value></property>
-<property><name>ipc.client.kill.max</name><value>10</value></property>
-<property><name>ipc.client.connect.max.retries</name><value>10</value></property>
-<property><name>fs.s3.impl</name><value>org.apache.hadoop.fs.s3.S3FileSystem</value></property>
-<property><name>mapred.job.tracker.http.address</name><value>0.0.0.0:50030</value></property>
-<property><name>io.file.buffer.size</name><value>4096</value></property>
-<property><name>mapred.jobtracker.restart.recover</name><value>false</value></property>
-<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization</value></property>
-<property><name>dfs.datanode.handler.count</name><value>3</value></property>
-<property><name>mapred.reduce.copy.backoff</name><value>300</value></property>
-<property><name>mapred.task.profile</name><value>false</value></property>
-<property><name>dfs.replication.considerLoad</name><value>true</value></property>
-<property><name>jobclient.output.filter</name><value>FAILED</value></property>
-<property><name>mapred.tasktracker.map.tasks.maximum</name><value>2</value></property>
-<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value></property>
-<property><name>fs.checkpoint.size</name><value>67108864</value></property>
-</configuration>
\ No newline at end of file
diff --git a/genomix/genomix-hadoop/actual5/conf.xml b/genomix/genomix-hadoop/actual5/conf.xml
deleted file mode 100644
index d19b061..0000000
--- a/genomix/genomix-hadoop/actual5/conf.xml
+++ /dev/null
@@ -1,179 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
-<property><name>fs.s3n.impl</name><value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value></property>
-<property><name>mapred.task.cache.levels</name><value>2</value></property>
-<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value></property>
-<property><name>hadoop.native.lib</name><value>true</value></property>
-<property><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value></property>
-<property><name>dfs.namenode.decommission.nodes.per.interval</name><value>5</value></property>
-<property><name>dfs.https.need.client.auth</name><value>false</value></property>
-<property><name>ipc.client.idlethreshold</name><value>4000</value></property>
-<property><name>mapred.system.dir</name><value>${hadoop.tmp.dir}/mapred/system</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.hours</name><value>0</value></property>
-<property><name>dfs.namenode.logging.level</name><value>info</value></property>
-<property><name>dfs.datanode.address</name><value>127.0.0.1:0</value></property>
-<property><name>io.skip.checksum.errors</name><value>false</value></property>
-<property><name>fs.default.name</name><value>hdfs://localhost:58289</value></property>
-<property><name>mapred.child.tmp</name><value>./tmp</value></property>
-<property><name>fs.har.impl.disable.cache</name><value>true</value></property>
-<property><name>dfs.safemode.threshold.pct</name><value>0.999f</value></property>
-<property><name>mapred.skip.reduce.max.skip.groups</name><value>0</value></property>
-<property><name>dfs.namenode.handler.count</name><value>10</value></property>
-<property><name>dfs.blockreport.initialDelay</name><value>0</value></property>
-<property><name>mapred.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value></property>
-<property><name>mapred.tasktracker.dns.nameserver</name><value>default</value></property>
-<property><name>io.sort.factor</name><value>10</value></property>
-<property><name>mapred.task.timeout</name><value>600000</value></property>
-<property><name>mapred.max.tracker.failures</name><value>4</value></property>
-<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value></property>
-<property><name>fs.hdfs.impl</name><value>org.apache.hadoop.hdfs.DistributedFileSystem</value></property>
-<property><name>mapred.queue.default.acl-administer-jobs</name><value>*</value></property>
-<property><name>mapred.queue.default.acl-submit-job</name><value>*</value></property>
-<property><name>mapred.skip.map.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.mapfile.bloom.size</name><value>1048576</value></property>
-<property><name>dfs.safemode.extension</name><value>0</value></property>
-<property><name>tasktracker.http.threads</name><value>40</value></property>
-<property><name>mapred.job.shuffle.merge.percent</name><value>0.66</value></property>
-<property><name>fs.ftp.impl</name><value>org.apache.hadoop.fs.ftp.FTPFileSystem</value></property>
-<property><name>mapred.output.compress</name><value>false</value></property>
-<property><name>io.bytes.per.checksum</name><value>512</value></property>
-<property><name>topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.StaticMapping</value></property>
-<property><name>dfs.https.server.keystore.resource</name><value>ssl-server.xml</value></property>
-<property><name>mapred.reduce.slowstart.completed.maps</name><value>0.05</value></property>
-<property><name>mapred.reduce.max.attempts</name><value>4</value></property>
-<property><name>fs.ramfs.impl</name><value>org.apache.hadoop.fs.InMemoryFileSystem</value></property>
-<property><name>dfs.name.edits.dir</name><value>${dfs.name.dir}</value></property>
-<property><name>mapred.skip.map.max.skip.records</name><value>0</value></property>
-<property><name>hadoop.job.ugi</name><value>hadoop,staff,everyone,localaccounts</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value></property>
-<property><name>dfs.block.size</name><value>67108864</value></property>
-<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value></property>
-<property><name>job.end.retry.attempts</name><value>0</value></property>
-<property><name>fs.file.impl</name><value>org.apache.hadoop.fs.LocalFileSystem</value></property>
-<property><name>mapred.local.dir.minspacestart</name><value>0</value></property>
-<property><name>mapred.output.compression.type</name><value>RECORD</value></property>
-<property><name>dfs.datanode.ipc.address</name><value>127.0.0.1:0</value></property>
-<property><name>dfs.permissions</name><value>true</value></property>
-<property><name>topology.script.number.args</name><value>100</value></property>
-<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value></property>
-<property><name>mapred.max.tracker.blacklists</name><value>4</value></property>
-<property><name>mapred.task.profile.maps</name><value>0-2</value></property>
-<property><name>dfs.datanode.https.address</name><value>0.0.0.0:50475</value></property>
-<property><name>mapred.userlog.retain.hours</name><value>24</value></property>
-<property><name>dfs.secondary.http.address</name><value>0.0.0.0:50090</value></property>
-<property><name>dfs.replication.max</name><value>512</value></property>
-<property><name>mapred.job.tracker.persist.jobstatus.active</name><value>false</value></property>
-<property><name>hadoop.security.authorization</name><value>false</value></property>
-<property><name>local.cache.size</name><value>10737418240</value></property>
-<property><name>mapred.min.split.size</name><value>0</value></property>
-<property><name>mapred.map.tasks</name><value>2</value></property>
-<property><name>mapred.child.java.opts</name><value>-Xmx200m</value></property>
-<property><name>dfs.https.client.keystore.resource</name><value>ssl-client.xml</value></property>
-<property><name>dfs.namenode.startup</name><value>REGULAR</value></property>
-<property><name>mapred.job.queue.name</name><value>default</value></property>
-<property><name>dfs.https.address</name><value>0.0.0.0:50470</value></property>
-<property><name>dfs.balance.bandwidthPerSec</name><value>1048576</value></property>
-<property><name>ipc.server.listen.queue.size</name><value>128</value></property>
-<property><name>mapred.inmem.merge.threshold</name><value>1000</value></property>
-<property><name>job.end.retry.interval</name><value>30000</value></property>
-<property><name>mapred.skip.attempts.to.start.skipping</name><value>2</value></property>
-<property><name>fs.checkpoint.dir</name><value>build/test/data/dfs/namesecondary1,build/test/data/dfs/namesecondary2</value></property>
-<property><name>mapred.reduce.tasks</name><value>1</value></property>
-<property><name>mapred.merge.recordsBeforeProgress</name><value>10000</value></property>
-<property><name>mapred.userlog.limit.kb</name><value>0</value></property>
-<property><name>dfs.max.objects</name><value>0</value></property>
-<property><name>webinterface.private.actions</name><value>false</value></property>
-<property><name>io.sort.spill.percent</name><value>0.80</value></property>
-<property><name>mapred.job.shuffle.input.buffer.percent</name><value>0.70</value></property>
-<property><name>dfs.datanode.dns.nameserver</name><value>default</value></property>
-<property><name>mapred.map.tasks.speculative.execution</name><value>true</value></property>
-<property><name>hadoop.util.hash.type</name><value>murmur</value></property>
-<property><name>dfs.blockreport.intervalMsec</name><value>3600000</value></property>
-<property><name>mapred.map.max.attempts</name><value>4</value></property>
-<property><name>dfs.client.block.write.retries</name><value>3</value></property>
-<property><name>mapred.job.tracker.handler.count</name><value>10</value></property>
-<property><name>mapred.tasktracker.expiry.interval</name><value>600000</value></property>
-<property><name>dfs.https.enable</name><value>false</value></property>
-<property><name>mapred.jobtracker.maxtasks.per.job</name><value>-1</value></property>
-<property><name>mapred.jobtracker.job.history.block.size</name><value>3145728</value></property>
-<property><name>keep.failed.task.files</name><value>false</value></property>
-<property><name>ipc.client.tcpnodelay</name><value>false</value></property>
-<property><name>mapred.task.profile.reduces</name><value>0-2</value></property>
-<property><name>mapred.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>io.map.index.skip</name><value>0</value></property>
-<property><name>ipc.server.tcpnodelay</name><value>false</value></property>
-<property><name>dfs.default.chunk.view.size</name><value>32768</value></property>
-<property><name>hadoop.logfile.size</name><value>10000000</value></property>
-<property><name>mapred.reduce.tasks.speculative.execution</name><value>true</value></property>
-<property><name>dfs.datanode.du.reserved</name><value>0</value></property>
-<property><name>fs.checkpoint.period</name><value>3600</value></property>
-<property><name>dfs.web.ugi</name><value>webuser,webgroup</value></property>
-<property><name>mapred.job.reuse.jvm.num.tasks</name><value>1</value></property>
-<property><name>mapred.jobtracker.completeuserjobs.maximum</name><value>100</value></property>
-<property><name>dfs.df.interval</name><value>60000</value></property>
-<property><name>dfs.data.dir</name><value>${hadoop.tmp.dir}/dfs/data</value></property>
-<property><name>fs.s3.maxRetries</name><value>4</value></property>
-<property><name>dfs.datanode.dns.interface</name><value>default</value></property>
-<property><name>dfs.support.append</name><value>false</value></property>
-<property><name>dfs.permissions.supergroup</name><value>supergroup</value></property>
-<property><name>mapred.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value></property>
-<property><name>fs.hftp.impl</name><value>org.apache.hadoop.hdfs.HftpFileSystem</value></property>
-<property><name>fs.trash.interval</name><value>0</value></property>
-<property><name>fs.s3.sleepTimeSeconds</name><value>10</value></property>
-<property><name>dfs.replication.min</name><value>1</value></property>
-<property><name>mapred.submit.replication</name><value>10</value></property>
-<property><name>fs.har.impl</name><value>org.apache.hadoop.fs.HarFileSystem</value></property>
-<property><name>mapred.map.output.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
-<property><name>mapred.tasktracker.dns.interface</name><value>default</value></property>
-<property><name>dfs.namenode.decommission.interval</name><value>3</value></property>
-<property><name>dfs.http.address</name><value>localhost:58290</value></property>
-<property><name>dfs.heartbeat.interval</name><value>3</value></property>
-<property><name>mapred.job.tracker</name><value>local</value></property>
-<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
-<property><name>dfs.name.dir</name><value>build/test/data/dfs/name1,build/test/data/dfs/name2</value></property>
-<property><name>mapred.line.input.format.linespermap</name><value>1</value></property>
-<property><name>mapred.jobtracker.taskScheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value></property>
-<property><name>mapred.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value></property>
-<property><name>dfs.datanode.http.address</name><value>127.0.0.1:0</value></property>
-<property><name>mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill</name><value>5000</value></property>
-<property><name>mapred.local.dir.minspacekill</name><value>0</value></property>
-<property><name>dfs.replication.interval</name><value>3</value></property>
-<property><name>io.sort.record.percent</name><value>0.05</value></property>
-<property><name>fs.kfs.impl</name><value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value></property>
-<property><name>mapred.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value></property>
-<property><name>mapred.tasktracker.reduce.tasks.maximum</name><value>2</value></property>
-<property><name>dfs.replication</name><value>2</value></property>
-<property><name>fs.checkpoint.edits.dir</name><value>${fs.checkpoint.dir}</value></property>
-<property><name>mapred.job.reduce.input.buffer.percent</name><value>0.0</value></property>
-<property><name>mapred.tasktracker.indexcache.mb</name><value>10</value></property>
-<property><name>hadoop.logfile.count</name><value>10</value></property>
-<property><name>mapred.skip.reduce.auto.incr.proc.count</name><value>true</value></property>
-<property><name>io.seqfile.compress.blocksize</name><value>1000000</value></property>
-<property><name>fs.s3.block.size</name><value>67108864</value></property>
-<property><name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name><value>5000</value></property>
-<property><name>mapred.acls.enabled</name><value>false</value></property>
-<property><name>mapred.queue.names</name><value>default</value></property>
-<property><name>dfs.access.time.precision</name><value>3600000</value></property>
-<property><name>fs.hsftp.impl</name><value>org.apache.hadoop.hdfs.HsftpFileSystem</value></property>
-<property><name>mapred.task.tracker.http.address</name><value>0.0.0.0:50060</value></property>
-<property><name>mapred.reduce.parallel.copies</name><value>5</value></property>
-<property><name>io.seqfile.lazydecompress</name><value>true</value></property>
-<property><name>io.sort.mb</name><value>100</value></property>
-<property><name>ipc.client.connection.maxidletime</name><value>10000</value></property>
-<property><name>mapred.compress.map.output</name><value>false</value></property>
-<property><name>mapred.task.tracker.report.address</name><value>127.0.0.1:0</value></property>
-<property><name>ipc.client.kill.max</name><value>10</value></property>
-<property><name>ipc.client.connect.max.retries</name><value>10</value></property>
-<property><name>fs.s3.impl</name><value>org.apache.hadoop.fs.s3.S3FileSystem</value></property>
-<property><name>mapred.job.tracker.http.address</name><value>0.0.0.0:50030</value></property>
-<property><name>io.file.buffer.size</name><value>4096</value></property>
-<property><name>mapred.jobtracker.restart.recover</name><value>false</value></property>
-<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization</value></property>
-<property><name>dfs.datanode.handler.count</name><value>3</value></property>
-<property><name>mapred.reduce.copy.backoff</name><value>300</value></property>
-<property><name>mapred.task.profile</name><value>false</value></property>
-<property><name>dfs.replication.considerLoad</name><value>true</value></property>
-<property><name>jobclient.output.filter</name><value>FAILED</value></property>
-<property><name>mapred.tasktracker.map.tasks.maximum</name><value>2</value></property>
-<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value></property>
-<property><name>fs.checkpoint.size</name><value>67108864</value></property>
-</configuration>
\ No newline at end of file
diff --git a/genomix/genomix-hadoop/actual5/result5/.part-00000.crc b/genomix/genomix-hadoop/actual5/result5/.part-00000.crc
deleted file mode 100644
index dafaae3..0000000
--- a/genomix/genomix-hadoop/actual5/result5/.part-00000.crc
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/actual5/result5/part-00000 b/genomix/genomix-hadoop/actual5/result5/part-00000
deleted file mode 100755
index deeff28..0000000
--- a/genomix/genomix-hadoop/actual5/result5/part-00000
+++ /dev/null
Binary files differ
diff --git a/genomix/genomix-hadoop/compare/result1/comparesource.txt b/genomix/genomix-hadoop/compare/result1/comparesource.txt
deleted file mode 100644
index ba52008..0000000
--- a/genomix/genomix-hadoop/compare/result1/comparesource.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-GCA	ACT|T	3
-AGC	|A	1
-CGC	T|AT	2
-TGC	|A	1
-ATC	C|G	2
-TCG	A|C	2
-CAT	G|C	2
-GCT	C|	1
diff --git a/genomix/genomix-hadoop/compare/result2/comparesource.txt b/genomix/genomix-hadoop/compare/result2/comparesource.txt
deleted file mode 100644
index db55a38..0000000
--- a/genomix/genomix-hadoop/compare/result2/comparesource.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-GCA	-72
-AGC	1
-CGC	-119
-TGC	1
-ATC	36
-TCG	18
-CAT	66
-GCT	32
diff --git a/genomix/genomix-hadoop/compare/result3/comparesource.txt b/genomix/genomix-hadoop/compare/result3/comparesource.txt
deleted file mode 100644
index ea6c574..0000000
--- a/genomix/genomix-hadoop/compare/result3/comparesource.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-CATC	68	1
-TCG	18	0
diff --git a/genomix/genomix-hadoop/compare/result5/comparesource.txt b/genomix/genomix-hadoop/compare/result5/comparesource.txt
deleted file mode 100644
index 6f4bd5e..0000000
--- a/genomix/genomix-hadoop/compare/result5/comparesource.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-GCA	AT|T	2
-AGC	|A	1
-TGC	|A	1
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathDriver.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathDriver.java
index 31995b6..36c12ae 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathDriver.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathDriver.java
@@ -115,7 +115,7 @@
                     MergePathValueWritable.class);
 
             MultipleOutputs.addNamedOutput(conf, complete,
-                    MergePathMultiTextOutputFormat.class, VKmerBytesWritable.class,
+                    MergePathMultiSeqOutputFormat.class, VKmerBytesWritable.class,
                     MergePathValueWritable.class);
             
             conf.setOutputKeyClass(VKmerBytesWritable.class);
@@ -152,11 +152,11 @@
         String complete = "complete" + iMerge;
        
         MultipleOutputs.addNamedOutput(conf, uncomplete,
-                MergePathMultiTextOutputFormat.class, VKmerBytesWritable.class,
+                MergePathMultiSeqOutputFormat.class, VKmerBytesWritable.class,
                 MergePathValueWritable.class);
 
         MultipleOutputs.addNamedOutput(conf, complete,
-                MergePathMultiTextOutputFormat.class, VKmerBytesWritable.class,
+                MergePathMultiSeqOutputFormat.class, VKmerBytesWritable.class,
                 MergePathValueWritable.class);
         
         conf.setOutputKeyClass(VKmerBytesWritable.class);
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMapper.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMapper.java
index fb55e18..0c7dcc1 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMapper.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMapper.java
@@ -59,7 +59,7 @@
             outputKmer.set(outputKmerFactory.shiftKmerWithNextCode(tmpKmer, succeedCode));
             
             tmpKmer.set(outputKmerFactory.getFirstKmerFromChain(key.getKmerLength() - (KMER_SIZE - 1), key));
-            outputValue.set(tmpKmer, adjBitMap, bitFlag);
+            outputValue.set(adjBitMap, bitFlag, tmpKmer);
             output.collect(outputKmer, outputValue);
         } else {
             outputKmer.set(key);
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMultiSeqOutputFormat.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMultiSeqOutputFormat.java
index 64fbb91..bd4cd2a 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMultiSeqOutputFormat.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathMultiSeqOutputFormat.java
@@ -17,9 +17,10 @@
 import java.io.File;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat;
+import edu.uci.ics.genomix.type.VKmerBytesWritable;
 
 
-public class MergePathMultiSeqOutputFormat extends MultipleSequenceFileOutputFormat<BytesWritable, MergePathValueWritable>{
+public class MergePathMultiSeqOutputFormat extends MultipleSequenceFileOutputFormat<VKmerBytesWritable, MergePathValueWritable>{
     @Override
     protected String generateLeafFileName(String name) {
         // TODO Auto-generated method stub System.out.println(name); 
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathReducer.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathReducer.java
index 921b233..52abc1c 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathReducer.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathReducer.java
@@ -54,69 +54,86 @@
             OutputCollector<VKmerBytesWritable, MergePathValueWritable> output, Reporter reporter) throws IOException {
         outputValue = values.next();
         if (values.hasNext() == true) {
-            if(outputValue.getFlag() != 1){
+            if (outputValue.getFlag() != 1) {
                 byte nextAdj = outputValue.getAdjBitMap();
                 byte succeed = (byte) 0x0F;
                 succeed = (byte) (succeed & nextAdj);
-                
+
                 outputValue = values.next();
                 byte adjBitMap = outputValue.getAdjBitMap();
-                byte flag = outputValue.getFlag();                
-                outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), key));
+                byte flag = outputValue.getFlag();
+                if (outputValue.getKmerLength() != 0)
+                    outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), key));
+                else
+                    outputKmer.set(key);
 
                 adjBitMap = (byte) (adjBitMap & 0xF0);
                 adjBitMap = (byte) (adjBitMap | succeed);
-                outputValue.set(null, 0, 0, adjBitMap, flag, 0);
+                outputValue.set(adjBitMap, flag, null);
                 mos.getCollector("uncomplete" + I_MERGE, reporter).collect(outputKmer, outputValue);
-            }
-            else{
+            } else {
                 tmpOutputValue.set(outputValue);
                 byte tmpAdjMap = tmpOutputValue.getAdjBitMap();
-                
+
                 outputValue = values.next();
-                if(outputValue.getFlag() != 1) {
-                    outputKmer.set(kmerFactory.mergeTwoKmer(tmpOutputValue.getKmer(), key));
-                    
+                if (outputValue.getFlag() != 1) {
+                    if (tmpOutputValue.getKmerLength() != 0)
+                        outputKmer.set(kmerFactory.mergeTwoKmer(tmpOutputValue.getKmer(), key));
+                    else
+                        outputKmer.set(key);
+
                     byte nextAdj = outputValue.getAdjBitMap();
                     byte succeed = (byte) 0x0F;
                     succeed = (byte) (succeed & nextAdj);
                     tmpAdjMap = (byte) (tmpAdjMap & 0xF0);
                     tmpAdjMap = (byte) (tmpAdjMap | succeed);
-                    outputValue.set(null, 0, 0, tmpAdjMap, tmpOutputValue.getFlag(), 0);
+                    outputValue.set(tmpAdjMap, tmpOutputValue.getFlag(), null);
                     mos.getCollector("uncomplete" + I_MERGE, reporter).collect(outputKmer, outputValue);
-                }
-                else{
-                    
-                    tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE-1, key));
-                    outputKmer.set(kmerFactory.mergeTwoKmer(tmpOutputValue.getKmer(), tmpKmer));
-                    tmpOutputValue.set(null, 0, 0, tmpAdjMap, tmpOutputValue.getFlag(), 0);
+                } else {
+
+                    tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE - 1, key));
+                    if (tmpOutputValue.getKmerLength() != 0)
+                        outputKmer.set(kmerFactory.mergeTwoKmer(tmpOutputValue.getKmer(), tmpKmer));
+                    else
+                        outputKmer.set(tmpKmer);
+                    tmpOutputValue.set(tmpAdjMap, tmpOutputValue.getFlag(), null);
                     mos.getCollector("complete" + I_MERGE, reporter).collect(outputKmer, tmpOutputValue);
-                    
-                    tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE-1, key));
-                    outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer));                    
-                    outputValue.set(null, 0, 0, outputValue.getAdjBitMap(), outputValue.getFlag(), 0);
+
+                    tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE - 1, key));
+                    if (outputValue.getKmerLength() != 0)
+                        outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer));
+                    else
+                        outputKmer.set(tmpKmer);
+                    outputValue.set(outputValue.getAdjBitMap(), outputValue.getFlag(), null);
                     mos.getCollector("complete" + I_MERGE, reporter).collect(outputKmer, outputValue);
-                                        
-                    while(values.hasNext()) {
+
+                    while (values.hasNext()) {
                         outputValue = values.next();
-                        tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE-1, key));
-                        outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer));                    
-                        outputValue.set(null, 0, 0, outputValue.getAdjBitMap(), outputValue.getFlag(), 0);
+                        tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE - 1, key));
+                        if (outputValue.getKmerLength() != 0)
+                            outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer));
+                        else
+                            outputKmer.set(tmpKmer);
+                        outputValue.set(outputValue.getAdjBitMap(), outputValue.getFlag(), null);
                         mos.getCollector("complete" + I_MERGE, reporter).collect(outputKmer, outputValue);
                     }
                 }
-            }                        
+            }
         } else {
             if (outputValue.getFlag() != 0) {
-                tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE-1, key));
-                outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer));                    
-                outputValue.set(null, 0, 0, outputValue.getAdjBitMap(), outputValue.getFlag(), 0);
+                tmpKmer.set(kmerFactory.getFirstKmerFromChain(KMER_SIZE - 1, key));
+                if (outputValue.getKmerLength() != 0)
+                    outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer));
+                else
+                    outputKmer.set(tmpKmer);
+                outputValue.set(outputValue.getAdjBitMap(), outputValue.getFlag(), null);
                 mos.getCollector("complete" + I_MERGE, reporter).collect(outputKmer, outputValue);
 
             } else
                 mos.getCollector("uncomplete" + I_MERGE, reporter).collect(key, outputValue);
         }
     }
+
     public void close() throws IOException {
         // TODO Auto-generated method stub
         mos.close();
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathValueWritable.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathValueWritable.java
index 5f04565..9686c18 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathValueWritable.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/MergePathValueWritable.java
@@ -43,17 +43,12 @@
         kmer.set(bytes, 0, bytes.length);
     }
 
-    public void set (MergePathValueWritable right) {
-        set(right.getBytes(), 0, right.getLength(), right.getAdjBitMap(), right.getFlag(), right.getKmerLength());
-    }
-    public void set(KmerBytesWritable mergedKmer, byte adjBitMap, byte bitFlag) {
-        set(mergedKmer.getBytes(), 0, mergedKmer.getLength(), adjBitMap, bitFlag, mergedKmer.getKmerLength());
+    public void set(MergePathValueWritable right) {
+        set(right.getAdjBitMap(), right.getFlag(), right.getKmer());
     }
 
-    public void set(byte[] newData, int offset, int length, byte adjBitMap, byte flag, int kmerSize) {
-        if (length != 0) {
-            kmer.set(kmerSize, newData, offset, length);
-        }
+    public void set(byte adjBitMap, byte flag, VKmerBytesWritable kmer) {
+        this.kmer.set(kmer);
         this.adjBitMap = adjBitMap;
         this.flag = flag;
     }
@@ -69,21 +64,18 @@
     @Override
     public void write(DataOutput arg0) throws IOException {
         // TODO Auto-generated method stub
+
         kmer.write(arg0);
         arg0.writeByte(adjBitMap);
         arg0.writeByte(flag);
     }
 
-    public KmerBytesWritable getKmer() {
+    public VKmerBytesWritable getKmer() {
         if (kmer.getLength() != 0) {
             return kmer;
         }
         return null;
     }
-    
-    public int getKmerLength() {
-        return kmer.getKmerLength();
-    }
 
     public byte getAdjBitMap() {
         return this.adjBitMap;
@@ -104,8 +96,12 @@
             return kmer.getBytes();
         } else
             return null;
+
     }
 
+    public int getKmerLength() {
+        return kmer.getKmerLength();
+    }
 
     @Override
     public int getLength() {
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialMapper.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialMapper.java
index 617ecf0..1058fda 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialMapper.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialMapper.java
@@ -109,7 +109,7 @@
         if (inDegree == false && outDegree == false) {
             outputKmer.set(key);
             bitFlag = (byte) 2;
-            outputAdjList.set(null, 0, 0, adjBitMap, bitFlag, KMER_SIZE);///~~~~~kmersize----->0
+            outputAdjList.set(adjBitMap, bitFlag, null);///~~~~~kmersize----->0
             output.collect(outputKmer, outputAdjList);
         }
         else{
@@ -122,7 +122,7 @@
                     byte succeedCode = GeneCode.getGeneCodeFromBitMap(temp);
                     shiftedCode = key.shiftKmerWithNextCode(succeedCode);
                     outputKmer.set(key);
-                    outputAdjList.set(null, 0, 0, (byte)0, bitFlag, KMER_SIZE);
+                    outputAdjList.set((byte)0, bitFlag, null);
                     output.collect(outputKmer, outputAdjList);
                     key.shiftKmerWithPreCode(shiftedCode);
                 }
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialReducer.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialReducer.java
index 2723b47..07cc32f 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialReducer.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmerging/SNodeInitialReducer.java
@@ -38,7 +38,7 @@
         if (values.hasNext() == true) {
             if (outputValue.getFlag() == 2) {
                 byte bitFlag = 1;
-                outputValue.set(null, 0, 0, outputValue.getAdjBitMap(), bitFlag, outputValue.getKmerLength());///outputValue.getKmerLength()
+                outputValue.set(outputValue.getAdjBitMap(), bitFlag, null);///outputValue.getKmerLength()
                 output.collect(outputKmer, outputValue);
             } else {
                 boolean flag = false;
@@ -51,14 +51,14 @@
                 }
                 if (flag == true) {
                     byte bitFlag = 1;
-                    outputValue.set(null, 0, 0, outputValue.getAdjBitMap(), bitFlag, outputValue.getKmerLength());
+                    outputValue.set(outputValue.getAdjBitMap(), bitFlag, null);
                     output.collect(outputKmer, outputValue);
                 }
             }
         } else {
             if (outputValue.getFlag() == 2) {
                 byte bitFlag = 0;
-                outputValue.set(null, 0, 0, outputValue.getAdjBitMap(), bitFlag, outputValue.getKmerLength());
+                outputValue.set(outputValue.getAdjBitMap(), bitFlag, null);
                 output.collect(outputKmer, outputValue);
             }
         }
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/ENodeInitialReducer.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/ENodeInitialReducer.java
index b955fcd..1f9bc82 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/ENodeInitialReducer.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/ENodeInitialReducer.java
@@ -16,7 +16,6 @@
     public BytesWritable outputKmer = new BytesWritable();
     public MergePathValueWritable outputAdjList = new MergePathValueWritable();
     
-
     @Override
     public void reduce(BytesWritable key, Iterator<MergePathValueWritable> values,
             OutputCollector<BytesWritable, MergePathValueWritable> output, Reporter reporter) throws IOException {
@@ -27,7 +26,7 @@
             bitFlag = (byte) (bitFlag & 0xFE);
             if (bitFlag == 2) {
                 bitFlag =  (byte) (0x80 | outputAdjList.getFlag());
-                outputAdjList.set(null, 0, 0, outputAdjList.getAdjBitMap(), bitFlag, outputAdjList.getKmerLength());
+                outputAdjList.set(outputAdjList.getAdjBitMap(), bitFlag, null);
                 output.collect(outputKmer, outputAdjList);
 
             } else {
@@ -41,7 +40,7 @@
                 }
                 if (flag == true) {
                     bitFlag =  (byte) (0x80 | outputAdjList.getFlag());
-                    outputAdjList.set(null, 0, 0, outputAdjList.getAdjBitMap(), bitFlag, outputAdjList.getKmerLength());
+                    outputAdjList.set(outputAdjList.getAdjBitMap(), bitFlag, null);
                     output.collect(outputKmer, outputAdjList);
                 }
             }
@@ -50,7 +49,7 @@
             bitFlag = (byte) (bitFlag & 0xFE);
             if (bitFlag == 2) {
                 bitFlag = 0;
-                outputAdjList.set(null, 0, 0, outputAdjList.getAdjBitMap(), bitFlag, outputAdjList.getKmerLength());
+                outputAdjList.set(outputAdjList.getAdjBitMap(), bitFlag, null);
                 output.collect(outputKmer, outputAdjList);
             } 
         }
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Driver.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Driver.java
index ca7f259..58849e2 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Driver.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Driver.java
@@ -18,6 +18,7 @@
 import edu.uci.ics.genomix.type.KmerBytesWritable;
 import edu.uci.ics.genomix.type.VKmerBytesWritable;
 
+
 @SuppressWarnings("deprecation")
 public class MergePathH2Driver {
     
@@ -60,22 +61,21 @@
         conf.setMapOutputValueClass(MergePathValueWritable.class);
         
         conf.setInputFormat(SequenceFileInputFormat.class);
-        conf.setOutputFormat(TextOutputFormat.class);
+        conf.setOutputFormat(SequenceFileOutputFormat.class);
         
         conf.setOutputKeyClass(VKmerBytesWritable.class);
         conf.setOutputValueClass(MergePathValueWritable.class);
         
         FileInputFormat.setInputPaths(conf, new Path(inputPath));
-        FileOutputFormat.setOutputPath(conf, new Path(outputPath));
+        FileOutputFormat.setOutputPath(conf, new Path(inputPath + "-step1"));
         conf.setNumReduceTasks(numReducers);
         FileSystem dfs = FileSystem.get(conf);
-        dfs.delete(new Path(outputPath), true);
+        dfs.delete(new Path(inputPath + "-step1"), true);
         JobClient.runJob(conf);
         int iMerge = 0;
 /*----------------------------------------------------------------------*/
-/*        for(iMerge = 0; iMerge < mergeRound; iMerge ++){
-        
-            conf = new JobConf(MergePathDriver.class);
+        for(iMerge = 0; iMerge < mergeRound; iMerge ++){
+            conf = new JobConf(MergePathH2Driver.class);
             conf.setInt("sizeKmer", sizeKmer);
             conf.setInt("iMerge", iMerge);
             
@@ -84,13 +84,18 @@
             }
             conf.setJobName("Path Merge");
             
-            conf.setMapperClass(MergePathMapper.class);
-            conf.setReducerClass(MergePathReducer.class);
+            conf.setMapperClass(MergePathH2Mapper.class);
+            conf.setReducerClass(MergePathH2Reducer.class);
             
             conf.setMapOutputKeyClass(VKmerBytesWritable.class);
-            conf.setMapOutputKeyClass(BytesWritable.class);
-
-<<<<<<< Updated upstream
+            conf.setMapOutputValueClass(MergePathValueWritable.class);
+            
+            conf.setInputFormat(SequenceFileInputFormat.class);
+            
+            String uncomplete = "uncomplete" + iMerge;
+            String complete = "complete" + iMerge;
+           
+            MultipleOutputs.addNamedOutput(conf, uncomplete,
                     MergePathMultiSeqOutputFormat.class, VKmerBytesWritable.class,
                     MergePathValueWritable.class);
 
@@ -109,10 +114,8 @@
             dfs.delete(new Path(inputPath + "-step1"), true);
             dfs.rename(new Path(outputPath + "/" + uncomplete), new Path(inputPath + "-step1"));
             dfs.rename(new Path(outputPath + "/" + complete), new Path(mergeResultPath + "/" + complete));
-<<<<<<< Updated upstream
         }
-
-        conf = new JobConf(MergePathDriver.class);
+        conf = new JobConf(MergePathH2Driver.class);
         conf.setInt("sizeKmer", sizeKmer);
         conf.setInt("iMerge", iMerge);
         
@@ -121,8 +124,8 @@
         }
         conf.setJobName("Path Merge");
         
-        conf.setMapperClass(MergePathMapper.class);
-        conf.setReducerClass(MergePathReducer.class);
+        conf.setMapperClass(MergePathH2Mapper.class);
+        conf.setReducerClass(MergePathH2Reducer.class);
         
         conf.setMapOutputKeyClass(VKmerBytesWritable.class);
         conf.setMapOutputValueClass(MergePathValueWritable.class);
@@ -150,7 +153,7 @@
         JobClient.runJob(conf);
         dfs.delete(new Path(inputPath + "-step1"), true);
         dfs.rename(new Path(outputPath + "/" + uncomplete), new Path(inputPath + "-step1"));
-        dfs.rename(new Path(outputPath + "/" + complete), new Path(mergeResultPath + "/" + complete));*/
+        dfs.rename(new Path(outputPath + "/" + complete), new Path(mergeResultPath + "/" + complete));
     }
 
     public static void main(String[] args) throws Exception {
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Mapper.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Mapper.java
index de7ecfc..6ea9dd3 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Mapper.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Mapper.java
@@ -48,15 +48,15 @@
 
                 tmpKmer.set(outputKmerFactory.getFirstKmerFromChain(key.getKmerLength() - (KMER_SIZE - 1), key));
                 bitFlag = (byte) (bitFlag | 0x08);
-                outputValue.set(tmpKmer, adjBitMap, bitFlag);
+                outputValue.set(adjBitMap, bitFlag, tmpKmer);
                 output.collect(outputKmer, outputValue);
                 break;
             case (byte) 0x80:
                 tmpKmer.set(outputKmerFactory.getFirstKmerFromChain(KMER_SIZE, key));
-                outputKmer.set(tmpKmer);//?????
+                outputKmer.set(tmpKmer);
                 tmpKmer.set(outputKmerFactory.getLastKmerFromChain(key.getKmerLength() - KMER_SIZE, key));
                 bitFlag = (byte) (bitFlag | 0x10);
-                outputValue.set(tmpKmer, adjBitMap, bitFlag);
+                outputValue.set(adjBitMap, bitFlag, tmpKmer);
                 output.collect(outputKmer, outputValue);
                 break;
             case (byte) 0x00:
@@ -66,20 +66,21 @@
 
                 tmpKmer.set(outputKmerFactory.getFirstKmerFromChain(key.getKmerLength() - (KMER_SIZE - 1), key));
                 bitFlag = (byte) (bitFlag | 0x08);
-                outputValue.set(tmpKmer, adjBitMap, bitFlag);
+                outputValue.set(adjBitMap, bitFlag, tmpKmer);
                 output.collect(outputKmer, outputValue);
 
                 bitFlag = (byte) (bitFlag & 0xF7);
                 tmpKmer.set(outputKmerFactory.getFirstKmerFromChain(KMER_SIZE, key));
-                outputKmer.set(tmpKmer);//?????
+                outputKmer.set(tmpKmer);
                 tmpKmer.set(outputKmerFactory.getLastKmerFromChain(key.getKmerLength() - KMER_SIZE, key));
                 bitFlag = (byte) (bitFlag | 0x10);
-                outputValue.set(tmpKmer, adjBitMap, bitFlag);
+                outputValue.set(adjBitMap, bitFlag, tmpKmer);
                 output.collect(outputKmer, outputValue);
                 break;
             case (byte) 0x81:
                 outputKmer.set(key);
-                outputValue.set(null, 0, 0, adjBitMap, bitFlag, 0);//????????
+                outputValue.set(adjBitMap, bitFlag, null);
+                output.collect(outputKmer, outputValue);
                 break;
         }
     }
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Reducer.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Reducer.java
index 5d5b872..ad8b3c2 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Reducer.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathH2Reducer.java
@@ -26,7 +26,6 @@
     MultipleOutputs mos = null;
     private int I_MERGE;
 
-
     public void configure(JobConf job) {
         mos = new MultipleOutputs(job);
         I_MERGE = Integer.parseInt(job.get("iMerge"));
@@ -43,58 +42,78 @@
     public void reduce(VKmerBytesWritable key, Iterator<MergePathValueWritable> values,
             OutputCollector<VKmerBytesWritable, MergePathValueWritable> output, Reporter reporter) throws IOException {
         outputValue = values.next();
+        outputKmer.set(key);
         if (values.hasNext() == true) {
             byte bitFlag = outputValue.getFlag();
             byte bitStartEnd = (byte) (0x81 & bitFlag);
             byte bitPosiNegative = (byte) (0x18 & bitFlag);
-            byte succeed = (byte) 0x0F;            
-
+            byte succeed = (byte) 0x0F;
             switch (bitPosiNegative) {
                 case (byte) 0x08:
-                    tmpKmer1.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), key));
-                byte adjBitMap = outputValue.getAdjBitMap();
+                    if (outputValue.getKmerLength() != 0)
+                        tmpKmer1.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), key));
+                    else
+                        tmpKmer1.set(key);
+                    byte adjBitMap = outputValue.getAdjBitMap();
                     outputValue = values.next();
-                    if (bitStartEnd == 0x80) {
-                        tmpKmer2.set(kmerFactory.mergeTwoKmer(key, outputValue.getKmer()));
-                        tmpOutputValue.set(null, 0, 0, outputValue.getAdjBitMap(), outputValue.getFlag(), 0);
+                    bitStartEnd = (byte) (0x81 & outputValue.getFlag());
+                    if (bitStartEnd == (byte) 0x80) {
+                        if (outputValue.getKmerLength() != 0)
+                            tmpKmer2.set(kmerFactory.mergeTwoKmer(key, outputValue.getKmer()));
+                        else
+                            tmpKmer2.set(key);
+                        byte tmpFlag = (byte) 0x80;
+                        tmpOutputValue.set(outputValue.getAdjBitMap(), outputValue.getFlag(), null);
                         mos.getCollector("uncomplete" + I_MERGE, reporter).collect(tmpKmer2, tmpOutputValue);
                     }
-
-                    outputKmer.set(kmerFactory.mergeTwoKmer(tmpKmer1, outputValue.getKmer()));
-                    succeed = (byte) (succeed & outputValue.getFlag());
+                    if (outputValue.getKmerLength() != 0)
+                        outputKmer.set(kmerFactory.mergeTwoKmer(tmpKmer1, outputValue.getKmer()));
+                    else
+                        outputKmer.set(tmpKmer1);
+                    succeed = (byte) (succeed & outputValue.getAdjBitMap());
                     adjBitMap = (byte) (adjBitMap & 0xF0);
                     adjBitMap = (byte) (adjBitMap | succeed);
                     byte outputFlag = (byte) (0x81 & bitFlag);
-                    outputFlag = (byte) (outputFlag & outputValue.getFlag());
-                    outputValue.set(null, 0, 0, adjBitMap, outputFlag, 0);
+                    outputFlag = (byte) (outputFlag | ((byte) 0x81 & outputValue.getFlag()));
+                    outputValue.set(adjBitMap, outputFlag, null);
                     mos.getCollector("uncomplete" + I_MERGE, reporter).collect(outputKmer, outputValue);
                     break;
                 case (byte) 0x10:
-                    tmpKmer1.set(kmerFactory.mergeTwoKmer(key, outputValue.getKmer()));
-                    if (bitStartEnd == 0x80) {
-                        tmpOutputValue.set(null, 0, 0, outputValue.getAdjBitMap(), outputValue.getFlag(), 0);
+                    if (outputValue.getKmerLength() != 0)
+                        tmpKmer1.set(kmerFactory.mergeTwoKmer(key, outputValue.getKmer()));
+                    else
+                        tmpKmer1.set(key);
+                    if (bitStartEnd == (byte) 0x80) {
+                        byte tmpFlag = (byte) 0x80;
+                        tmpOutputValue.set(outputValue.getAdjBitMap(), tmpFlag, null);
                         mos.getCollector("uncomplete" + I_MERGE, reporter).collect(tmpKmer1, tmpOutputValue);
                     }
-                    succeed = (byte) (succeed & outputValue.getFlag());
+                    succeed = (byte) (succeed & outputValue.getAdjBitMap());
                     outputValue = values.next();
-                    outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer1));
+                    if (outputValue.getKmerLength() != 0)
+                        outputKmer.set(kmerFactory.mergeTwoKmer(outputValue.getKmer(), tmpKmer1));
+                    else
+                        outputKmer.set(tmpKmer1);
                     adjBitMap = outputValue.getAdjBitMap();
                     adjBitMap = (byte) (adjBitMap & 0xF0);
-                    adjBitMap = (byte) (adjBitMap | succeed);
+                    adjBitMap = (byte) (adjBitMap | succeed);                    
                     outputFlag = (byte) (0x81 & bitFlag);
-                    outputFlag = (byte) (outputFlag & outputValue.getFlag());
-                    outputValue.set(null, 0, 0, adjBitMap, outputFlag, 0);
+                    outputFlag = (byte) (outputFlag | ((byte) 0x81 & outputValue.getFlag()));
+                    outputValue.set(adjBitMap, outputFlag, null);
                     mos.getCollector("uncomplete" + I_MERGE, reporter).collect(outputKmer, outputValue);
                     break;
             }
         } else {
             byte bitFlag = outputValue.getFlag();
             byte bitStartEnd = (byte) (0x81 & bitFlag);
-            if(bitStartEnd == 0x81) {
+            if (bitStartEnd == (byte) 0x81) {
                 outputKmer.set(key);
                 mos.getCollector("complete" + I_MERGE, reporter).collect(outputKmer, outputValue);
             }
-
         }
     }
+    public void close() throws IOException {
+        // TODO Auto-generated method stub
+        mos.close();
+    }
 }
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathMultiSeqOutputFormat.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathMultiSeqOutputFormat.java
index cbde512..5e6f008 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathMultiSeqOutputFormat.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathMultiSeqOutputFormat.java
@@ -18,8 +18,10 @@
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat;
 
+import edu.uci.ics.genomix.type.VKmerBytesWritable;
 
-public class MergePathMultiSeqOutputFormat extends MultipleSequenceFileOutputFormat<BytesWritable, MergePathValueWritable>{
+
+public class MergePathMultiSeqOutputFormat extends MultipleSequenceFileOutputFormat<VKmerBytesWritable, MergePathValueWritable>{
     @Override
     protected String generateLeafFileName(String name) {
         // TODO Auto-generated method stub System.out.println(name); 
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathValueWritable.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathValueWritable.java
index 3faba46..2f1869d 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathValueWritable.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/MergePathValueWritable.java
@@ -22,7 +22,6 @@
 import org.apache.hadoop.io.WritableComparable;
 
 import edu.uci.ics.genomix.type.GeneCode;
-import edu.uci.ics.genomix.type.KmerBytesWritable;
 import edu.uci.ics.genomix.type.VKmerBytesWritable;
 
 public class MergePathValueWritable extends BinaryComparable implements WritableComparable<BinaryComparable> {
@@ -35,7 +34,13 @@
     public MergePathValueWritable() {
         this((byte) 0, (byte) 0, 0, EMPTY_BYTES);
     }
-
+    
+    public MergePathValueWritable(int k) {
+        this.adjBitMap = 0;
+        this.flag = 0;
+        this.kmer = new VKmerBytesWritable(k);
+    }
+    
     public MergePathValueWritable(byte adjBitMap, byte flag, int kmerSize, byte[] bytes) {
         this.adjBitMap = adjBitMap;
         this.flag = flag;
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialMapper.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialMapper.java
index 076b3d9..4c05dac 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialMapper.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialMapper.java
@@ -100,11 +100,10 @@
             int b = a;
         }
         if (inDegree == false && outDegree == false) {
-
             outputKmer.set(key);
             System.out.println(outputKmer.hashCode());
             bitFlag = (byte) 2;
-            outputAdjList.set(null, 0, 0, adjBitMap, bitFlag, 0);
+            outputAdjList.set(adjBitMap, bitFlag, null);
             output.collect(outputKmer, outputAdjList);
         } else {
             for (int i = 0; i < 4; i++) {
@@ -117,7 +116,7 @@
                     shiftedCode = key.shiftKmerWithPreCode(precurCode);
                     outputKmer.set(key);
                     bitFlag = (byte) 0x80;
-                    outputAdjList.set(null, 0, 0, (byte) 0, bitFlag, 0);
+                    outputAdjList.set((byte) 0, bitFlag, null);
                     output.collect(outputKmer, outputAdjList);
                     key.shiftKmerWithNextCode(shiftedCode);
                 }
@@ -132,7 +131,7 @@
                     shiftedCode = key.shiftKmerWithNextCode(succeedCode);
                     outputKmer.set(key);
                     bitFlag = (byte) 0x01;
-                    outputAdjList.set(null, 0, 0, (byte) 0, bitFlag, 0);
+                    outputAdjList.set((byte) 0, bitFlag, null);
                     output.collect(outputKmer, outputAdjList);
                     key.shiftKmerWithPreCode(shiftedCode);
                 }
diff --git a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialReducer.java b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialReducer.java
index a858524..7fd7a2e 100644
--- a/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialReducer.java
+++ b/genomix/genomix-hadoop/src/main/java/edu/uci/ics/pathmergingh2/SNodeInitialReducer.java
@@ -66,13 +66,13 @@
                 if(endFlag == (byte) 0x80) {
                     outputFlag = (byte) (outputFlag | endFlag);
                 }
-                outputValue.set(null, 0, 0, targetAdjList, outputFlag, 0);
+                outputValue.set(targetAdjList, outputFlag, null);
                 output.collect(outputKmer, outputValue);
             }
         } else {
             if (outputValue.getFlag() == 2) {
                 byte bitFlag = 0;
-                outputValue.set(null, 0, 0, outputValue.getAdjBitMap(), bitFlag, 0);
+                outputValue.set(outputValue.getAdjBitMap(), bitFlag, null);
                 output.collect(outputKmer, outputValue);
             }
         }
diff --git a/genomix/genomix-hadoop/src/test/java/edu/uci/ics/graphbuilding/GraphBuildingTest.java b/genomix/genomix-hadoop/src/test/java/edu/uci/ics/graphbuilding/GraphBuildingTest.java
index 44af11b..97b861c 100755
--- a/genomix/genomix-hadoop/src/test/java/edu/uci/ics/graphbuilding/GraphBuildingTest.java
+++ b/genomix/genomix-hadoop/src/test/java/edu/uci/ics/graphbuilding/GraphBuildingTest.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.Test;
 
+import edu.uci.ics.genomix.type.GeneCode;
 import edu.uci.ics.genomix.type.KmerBytesWritable;
 import edu.uci.ics.genomix.type.KmerCountValue;
 import edu.uci.ics.utils.TestUtils;
@@ -45,13 +46,14 @@
     private static final String COMPARE_DIR = "compare";
     private JobConf conf = new JobConf();
     private static final String HADOOP_CONF_PATH = ACTUAL_RESULT_DIR + File.separator + "conf.xml";
-    private static final String DATA_PATH = "data/webmap/Test.txt";
+    private static final String DATA_PATH = "data/webmap/BridgePath";
     private static final String HDFS_PATH = "/webmap";
     private static final String RESULT_PATH = "/result1";
     private static final String EXPECTED_PATH = "expected/result1";
     private static final String TEST_SOURCE_DIR = COMPARE_DIR + RESULT_PATH + "/comparesource.txt";
     private static final int COUNT_REDUCER = 4;
-    private static final int SIZE_KMER = 3;
+    private static final int SIZE_KMER = 5;
+    private static final String GRAPHVIZ = "Graphviz/GenomixSource.txt";
     
     private MiniDFSCluster dfsCluster;
     private MiniMRCluster mrCluster;
@@ -76,15 +78,35 @@
         KmerCountValue value = (KmerCountValue) ReflectionUtils.newInstance(reader.getValueClass(), conf);
         File filePathTo = new File(TEST_SOURCE_DIR);
         BufferedWriter bw = new BufferedWriter(new FileWriter(filePathTo));
+        File GraphViz = new File(GRAPHVIZ);
+        BufferedWriter bw2 = new BufferedWriter(new FileWriter(GraphViz));
         
         while (reader.next(key, value)) {
+            byte succeed = (byte) 0x0F;
+            byte adjBitMap = value.getAdjBitMap();
+            succeed = (byte) (succeed & adjBitMap);
+            byte shiftedCode = 0;
+            for(int i = 0 ; i < 4; i ++){
+                byte temp = 0x01;
+                temp  = (byte)(temp << i);
+                temp = (byte) (succeed & temp);
+                if(temp != 0 ){
+                    bw2.write(key.toString());
+                    bw2.newLine();                    
+                    byte succeedCode = GeneCode.getGeneCodeFromBitMap(temp);
+                    shiftedCode = key.shiftKmerWithNextCode(succeedCode);
+                    bw2.write(key.toString());
+                    bw2.newLine();
+                    key.shiftKmerWithPreCode(shiftedCode);
+                }
+            }
             bw.write(key.toString() + "\t" + value.toString());
-            bw.newLine();
+            bw.newLine();            
         }
         bw.close();
 
         dumpResult();
-        TestUtils.compareWithResult(new File(TEST_SOURCE_DIR), new File(EXPECTED_PATH));
+//        TestUtils.compareWithResult(new File(TEST_SOURCE_DIR), new File(EXPECTED_PATH));
 
         cleanupHadoop();
 
diff --git a/genomix/genomix-hadoop/src/test/java/edu/uci/ics/pathmergingh2/MergePathH2Test.java b/genomix/genomix-hadoop/src/test/java/edu/uci/ics/pathmergingh2/MergePathH2Test.java
index 01b3884..ff15299 100644
--- a/genomix/genomix-hadoop/src/test/java/edu/uci/ics/pathmergingh2/MergePathH2Test.java
+++ b/genomix/genomix-hadoop/src/test/java/edu/uci/ics/pathmergingh2/MergePathH2Test.java
@@ -48,11 +48,11 @@
         startHadoop();
 
         MergePathH2Driver tldriver = new MergePathH2Driver();
-        tldriver.run(HDFS_PATH, RESULT_PATH, HDFA_PATH_DATA, COUNT_REDUCER, SIZE_KMER, 0, HADOOP_CONF_PATH);
+        tldriver.run(HDFS_PATH, RESULT_PATH, HDFA_PATH_DATA, COUNT_REDUCER, SIZE_KMER, 1, HADOOP_CONF_PATH);
         
 /*        SequenceFile.Reader reader = null;
-        Path path = new Path(RESULT_PATH + "/part-00000");
-//        Path path = new Path(RESULT_PATH + "/uncomplete0" + "/uncomplete0-r-00000");
+//        Path path = new Path(RESULT_PATH + "/part-00000");
+        Path path = new Path(RESULT_PATH + "/uncomplete0" + "/uncomplete0-r-00000");
         reader = new SequenceFile.Reader(dfs, path, conf);
         VKmerBytesWritable key = (VKmerBytesWritable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
         MergePathValueWritable value = (MergePathValueWritable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
@@ -63,7 +63,7 @@
             bw.newLine();
         }
         bw.close();*/
-        dumpResult();
+//        dumpResult();
         
 //        TestUtils.compareWithResult(new File(TEST_SOURCE_DIR), new File(EXPECTED_PATH));