move hivesterix codebase into hyracks fullstack

git-svn-id: https://hyracks.googlecode.com/svn/branches/fullstack_staging@2420 123451ca-8445-de46-9d55-352943316053
diff --git a/hivesterix/src/test/resources/optimizerts/hive/conf/hive-default.xml b/hivesterix/src/test/resources/optimizerts/hive/conf/hive-default.xml
new file mode 100644
index 0000000..d5d0149
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/hive/conf/hive-default.xml
@@ -0,0 +1,793 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+	<!-- Hive Configuration can either be stored in this file or in the hadoop 
+		configuration files -->
+	<!-- that are implied by Hadoop setup variables. -->
+	<!-- Aside from Hadoop setup variables - this file is provided as a convenience 
+		so that Hive -->
+	<!-- users do not have to edit hadoop configuration files (that may be managed 
+		as a centralized -->
+	<!-- resource). -->
+
+	<!-- Hive Execution Parameters -->
+	<property>
+		<name>mapred.reduce.tasks</name>
+		<value>-1</value>
+		<description>The default number of reduce tasks per job. Typically set
+			to a prime close to the number of available hosts. Ignored when
+			mapred.job.tracker is "local". Hadoop set this to 1 by default,
+			whereas hive uses -1 as its default value.
+			By setting this property to
+			-1, Hive will automatically figure out what
+			should be the number of
+			reducers.
+  </description>
+	</property>
+
+	<property>
+		<name>hive.hyracks.host</name>
+		<value>127.0.0.1</value>
+	</property>
+
+	<property>
+		<name>hive.hyracks.port</name>
+		<value>13099</value>
+	</property>
+
+	<property>
+		<name>hive.hyracks.app</name>
+		<value>hivesterix</value>
+	</property>
+
+
+	<property>
+		<name>hive.hyracks.parrallelism</name>
+		<value>2</value>
+	</property>
+
+	<property>
+		<name>hive.algebricks.groupby.external</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>hive.algebricks.groupby.external.memory</name>
+		<value>3072</value>
+	</property>
+
+	<property>
+		<name>hive.algebricks.sort.memory</name>
+		<value>3072</value>
+	</property>
+
+	<property>
+		<name>hive.algebricks.framesize</name>
+		<value>768</value>
+	</property>
+
+	<property>
+		<name>hive.exec.reducers.bytes.per.reducer</name>
+		<value>1000000000</value>
+		<description>size per reducer.The default is 1G, i.e if the input size
+			is 10G, it will use 10 reducers.</description>
+	</property>
+
+	<property>
+		<name>hive.exec.reducers.max</name>
+		<value>999</value>
+		<description>max number of reducers will be used. If the one
+			specified
+			in the configuration parameter mapred.reduce.tasks is
+			negative, hive
+			will use this one as the max number of reducers when
+			automatically
+			determine number of reducers.</description>
+	</property>
+
+	<property>
+		<name>hive.exec.scratchdir</name>
+		<value>/tmp/hive-${user.name}</value>
+		<description>Scratch space for Hive jobs</description>
+	</property>
+
+	<property>
+		<name>hive.test.mode</name>
+		<value>false</value>
+		<description>whether hive is running in test mode. If yes, it turns on
+			sampling and prefixes the output tablename</description>
+	</property>
+
+	<property>
+		<name>hive.test.mode.prefix</name>
+		<value>test_</value>
+		<description>if hive is running in test mode, prefixes the output
+			table by this string</description>
+	</property>
+
+	<!-- If the input table is not bucketed, the denominator of the tablesample 
+		is determinied by the parameter below -->
+	<!-- For example, the following query: -->
+	<!-- INSERT OVERWRITE TABLE dest -->
+	<!-- SELECT col1 from src -->
+	<!-- would be converted to -->
+	<!-- INSERT OVERWRITE TABLE test_dest -->
+	<!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
+	<property>
+		<name>hive.test.mode.samplefreq</name>
+		<value>32</value>
+		<description>if hive is running in test mode and table is not
+			bucketed, sampling frequency</description>
+	</property>
+
+	<property>
+		<name>hive.test.mode.nosamplelist</name>
+		<value></value>
+		<description>if hive is running in test mode, dont sample the above
+			comma seperated list of tables</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.local</name>
+		<value>true</value>
+		<description>controls whether to connect to remove metastore server or
+			open a new metastore server in Hive Client JVM</description>
+	</property>
+
+	<property>
+		<name>javax.jdo.option.ConnectionURL</name>
+		<value>jdbc:derby:;databaseName=metastore_db;create=true</value>
+		<description>JDBC connect string for a JDBC metastore</description>
+	</property>
+
+	<property>
+		<name>javax.jdo.option.ConnectionDriverName</name>
+		<value>org.apache.derby.jdbc.EmbeddedDriver</value>
+		<description>Driver class name for a JDBC metastore</description>
+	</property>
+
+	<property>
+		<name>javax.jdo.PersistenceManagerFactoryClass</name>
+		<value>org.datanucleus.jdo.JDOPersistenceManagerFactory</value>
+		<description>class implementing the jdo persistence</description>
+	</property>
+
+	<property>
+		<name>datanucleus.connectionPoolingType</name>
+		<value>DBCP</value>
+		<description>Uses a DBCP connection pool for JDBC metastore
+		</description>
+	</property>
+
+	<property>
+		<name>javax.jdo.option.DetachAllOnCommit</name>
+		<value>true</value>
+		<description>detaches all objects from session so that they can be
+			used after transaction is committed</description>
+	</property>
+
+	<property>
+		<name>javax.jdo.option.NonTransactionalRead</name>
+		<value>true</value>
+		<description>reads outside of transactions</description>
+	</property>
+
+	<property>
+		<name>javax.jdo.option.ConnectionUserName</name>
+		<value>APP</value>
+		<description>username to use against metastore database</description>
+	</property>
+
+	<property>
+		<name>javax.jdo.option.ConnectionPassword</name>
+		<value>mine</value>
+		<description>password to use against metastore database</description>
+	</property>
+
+	<property>
+		<name>datanucleus.validateTables</name>
+		<value>false</value>
+		<description>validates existing schema against code. turn this on if
+			you want to verify existing schema </description>
+	</property>
+
+	<property>
+		<name>datanucleus.validateColumns</name>
+		<value>false</value>
+		<description>validates existing schema against code. turn this on if
+			you want to verify existing schema </description>
+	</property>
+
+	<property>
+		<name>datanucleus.validateConstraints</name>
+		<value>false</value>
+		<description>validates existing schema against code. turn this on if
+			you want to verify existing schema </description>
+	</property>
+
+	<property>
+		<name>datanucleus.storeManagerType</name>
+		<value>rdbms</value>
+		<description>metadata store type</description>
+	</property>
+
+	<property>
+		<name>datanucleus.autoCreateSchema</name>
+		<value>true</value>
+		<description>creates necessary schema on a startup if one doesn't
+			exist. set this to false, after creating it once</description>
+	</property>
+
+	<property>
+		<name>datanucleus.autoStartMechanismMode</name>
+		<value>checked</value>
+		<description>throw exception if metadata tables are incorrect
+		</description>
+	</property>
+
+	<property>
+		<name>datanucleus.transactionIsolation</name>
+		<value>read-committed</value>
+		<description>Default transaction isolation level for identity
+			generation. </description>
+	</property>
+
+	<property>
+		<name>datanucleus.cache.level2</name>
+		<value>false</value>
+		<description>Use a level 2 cache. Turn this off if metadata is changed
+			independently of hive metastore server</description>
+	</property>
+
+	<property>
+		<name>datanucleus.cache.level2.type</name>
+		<value>SOFT</value>
+		<description>SOFT=soft reference based cache, WEAK=weak reference
+			based cache.</description>
+	</property>
+
+	<property>
+		<name>datanucleus.identifierFactory</name>
+		<value>datanucleus</value>
+		<description>Name of the identifier factory to use when generating
+			table/column names etc. 'datanucleus' is used for backward
+			compatibility</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.warehouse.dir</name>
+		<value>/tmp/hivesterix</value>
+		<description>location of default database for the warehouse
+		</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.connect.retries</name>
+		<value>5</value>
+		<description>Number of retries while opening a connection to metastore
+		</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.rawstore.impl</name>
+		<value>org.apache.hadoop.hive.metastore.ObjectStore</value>
+		<description>Name of the class that implements
+			org.apache.hadoop.hive.metastore.rawstore interface. This class is
+			used to store and retrieval of raw metadata objects such as table,
+			database</description>
+	</property>
+
+	<property>
+		<name>hive.default.fileformat</name>
+		<value>TextFile</value>
+		<description>Default file format for CREATE TABLE statement. Options
+			are TextFile and SequenceFile. Users can explicitly say CREATE TABLE
+			... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
+	</property>
+
+	<property>
+		<name>hive.fileformat.check</name>
+		<value>true</value>
+		<description>Whether to check file format or not when loading data
+			files</description>
+	</property>
+
+	<property>
+		<name>hive.map.aggr</name>
+		<value>true</value>
+		<description>Whether to use map-side aggregation in Hive Group By
+			queries</description>
+	</property>
+
+	<property>
+		<name>hive.groupby.skewindata</name>
+		<value>false</value>
+		<description>Whether there is skew in data to optimize group by
+			queries</description>
+	</property>
+
+	<property>
+		<name>hive.groupby.mapaggr.checkinterval</name>
+		<value>100000</value>
+		<description>Number of rows after which size of the grouping
+			keys/aggregation classes is performed</description>
+	</property>
+
+	<property>
+		<name>hive.mapred.local.mem</name>
+		<value>0</value>
+		<description>For local mode, memory of the mappers/reducers
+		</description>
+	</property>
+
+	<property>
+		<name>hive.map.aggr.hash.percentmemory</name>
+		<value>0.5</value>
+		<description>Portion of total memory to be used by map-side grup
+			aggregation hash table</description>
+	</property>
+
+	<property>
+		<name>hive.map.aggr.hash.min.reduction</name>
+		<value>0.5</value>
+		<description>Hash aggregation will be turned off if the ratio between
+			hash
+			table size and input rows is bigger than this number. Set to 1 to
+			make
+			sure
+			hash aggregation is never turned off.</description>
+	</property>
+
+	<property>
+		<name>hive.optimize.cp</name>
+		<value>true</value>
+		<description>Whether to enable column pruner</description>
+	</property>
+
+	<property>
+		<name>hive.optimize.ppd</name>
+		<value>true</value>
+		<description>Whether to enable predicate pushdown</description>
+	</property>
+
+	<property>
+		<name>hive.optimize.pruner</name>
+		<value>true</value>
+		<description>Whether to enable the new partition pruner which depends
+			on predicate pushdown. If this is disabled,
+			the old partition pruner
+			which is based on AST will be enabled.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.optimize.groupby</name>
+		<value>true</value>
+		<description>Whether to enable the bucketed group by from bucketed
+			partitions/tables.</description>
+	</property>
+
+	<property>
+		<name>hive.join.emit.interval</name>
+		<value>1000</value>
+		<description>How many rows in the right-most join operand Hive should
+			buffer before emitting the join result. </description>
+	</property>
+
+	<property>
+		<name>hive.join.cache.size</name>
+		<value>25000</value>
+		<description>How many rows in the joining tables (except the streaming
+			table) should be cached in memory. </description>
+	</property>
+
+	<property>
+		<name>hive.mapjoin.bucket.cache.size</name>
+		<value>100</value>
+		<description>How many values in each keys in the map-joined table
+			should be cached in memory. </description>
+	</property>
+
+	<property>
+		<name>hive.mapjoin.maxsize</name>
+		<value>100000</value>
+		<description>Maximum # of rows of the small table that can be handled
+			by map-side join. If the size is reached and hive.task.progress is
+			set, a fatal error counter is set and the job will be killed.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.mapjoin.cache.numrows</name>
+		<value>25000</value>
+		<description>How many rows should be cached by jdbm for map join.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.optimize.skewjoin</name>
+		<value>false</value>
+		<description>Whether to enable skew join optimization. </description>
+	</property>
+
+	<property>
+		<name>hive.skewjoin.key</name>
+		<value>100000</value>
+		<description>Determine if we get a skew key in join. If we see more
+			than the specified number of rows with the same key in join operator,
+			we think the key as a skew join key. </description>
+	</property>
+
+	<property>
+		<name>hive.skewjoin.mapjoin.map.tasks</name>
+		<value>10000</value>
+		<description> Determine the number of map task used in the follow up
+			map join job
+			for a skew join. It should be used together with
+			hive.skewjoin.mapjoin.min.split
+			to perform a fine grained control.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.skewjoin.mapjoin.min.split</name>
+		<value>33554432</value>
+		<description> Determine the number of map task at most used in the
+			follow up map join job
+			for a skew join by specifying the minimum split
+			size. It should be used
+			together with
+			hive.skewjoin.mapjoin.map.tasks
+			to perform a fine grained control.</description>
+	</property>
+
+	<property>
+		<name>hive.mapred.mode</name>
+		<value>nonstrict</value>
+		<description>The mode in which the hive operations are being
+			performed. In strict mode, some risky queries are not allowed to run
+		</description>
+	</property>
+
+	<property>
+		<name>hive.exec.script.maxerrsize</name>
+		<value>100000</value>
+		<description>Maximum number of bytes a script is allowed to emit to
+			standard error (per map-reduce task). This prevents runaway scripts
+			from filling logs partitions to capacity </description>
+	</property>
+
+	<property>
+		<name>hive.exec.script.allow.partial.consumption</name>
+		<value>false</value>
+		<description> When enabled, this option allows a user script to exit
+			successfully without consuming all the data from the standard input.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.script.operator.id.env.var</name>
+		<value>HIVE_SCRIPT_OPERATOR_ID</value>
+		<description> Name of the environment variable that holds the unique
+			script operator ID in the user's transform function (the custom
+			mapper/reducer that the user has specified in the query)
+		</description>
+	</property>
+
+	<property>
+		<name>hive.exec.compress.output</name>
+		<value>false</value>
+		<description> This controls whether the final outputs of a query (to a
+			local/hdfs file or a hive table) is compressed. The compression codec
+			and other options are determined from hadoop config variables
+			mapred.output.compress* </description>
+	</property>
+
+	<property>
+		<name>hive.exec.compress.intermediate</name>
+		<value>false</value>
+		<description> This controls whether intermediate files produced by
+			hive between multiple map-reduce jobs are compressed. The compression
+			codec and other options are determined from hadoop config variables
+			mapred.output.compress* </description>
+	</property>
+
+	<property>
+		<name>hive.exec.parallel</name>
+		<value>false</value>
+		<description>Whether to execute jobs in parallel</description>
+	</property>
+
+	<property>
+		<name>hive.exec.parallel.thread.number</name>
+		<value>8</value>
+		<description>How many jobs at most can be executed in parallel
+		</description>
+	</property>
+
+	<property>
+		<name>hive.hwi.war.file</name>
+		<value>lib\hive-hwi-0.7.0.war</value>
+		<description>This sets the path to the HWI war file, relative to
+			${HIVE_HOME}. </description>
+	</property>
+
+	<property>
+		<name>hive.hwi.listen.host</name>
+		<value>0.0.0.0</value>
+		<description>This is the host address the Hive Web Interface will
+			listen on</description>
+	</property>
+
+	<property>
+		<name>hive.hwi.listen.port</name>
+		<value>9999</value>
+		<description>This is the port the Hive Web Interface will listen on
+		</description>
+	</property>
+
+	<property>
+		<name>hive.exec.pre.hooks</name>
+		<value></value>
+		<description>Pre Execute Hook for Tests</description>
+	</property>
+
+	<property>
+		<name>hive.merge.mapfiles</name>
+		<value>true</value>
+		<description>Merge small files at the end of a map-only job
+		</description>
+	</property>
+
+	<property>
+		<name>hive.merge.mapredfiles</name>
+		<value>false</value>
+		<description>Merge small files at the end of a map-reduce job
+		</description>
+	</property>
+
+	<property>
+		<name>hive.heartbeat.interval</name>
+		<value>1000</value>
+		<description>Send a heartbeat after this interval - used by mapjoin
+			and filter operators</description>
+	</property>
+
+	<property>
+		<name>hive.merge.size.per.task</name>
+		<value>256000000</value>
+		<description>Size of merged files at the end of the job</description>
+	</property>
+
+	<property>
+		<name>hive.merge.size.smallfiles.avgsize</name>
+		<value>16000000</value>
+		<description>When the average output file size of a job is less than
+			this number, Hive will start an additional map-reduce job to merge
+			the output files into bigger files. This is only done for map-only
+			jobs if hive.merge.mapfiles is true, and for map-reduce jobs if
+			hive.merge.mapredfiles is true.</description>
+	</property>
+
+	<property>
+		<name>hive.script.auto.progress</name>
+		<value>false</value>
+		<description>Whether Hive Tranform/Map/Reduce Clause should
+			automatically send progress information to TaskTracker to avoid the
+			task getting killed because of inactivity. Hive sends progress
+			information when the script is outputting to stderr. This option
+			removes the need of periodically producing stderr messages, but users
+			should be cautious because this may prevent infinite loops in the
+			scripts to be killed by TaskTracker.  </description>
+	</property>
+
+	<property>
+		<name>hive.script.serde</name>
+		<value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
+		<description>The default serde for trasmitting input data to and
+			reading output data from the user scripts. </description>
+	</property>
+
+	<property>
+		<name>hive.script.recordreader</name>
+		<value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
+		<description>The default record reader for reading data from the user
+			scripts. </description>
+	</property>
+
+	<property>
+		<name>hive.script.recordwriter</name>
+		<value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
+		<description>The default record writer for writing data to the user
+			scripts. </description>
+	</property>
+
+	<property>
+		<name>hive.input.format</name>
+		<value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+		<description>The default input format, if it is not specified, the
+			system assigns it. It is set to HiveInputFormat for hadoop versions
+			17, 18 and 19, whereas it is set to CombinedHiveInputFormat for
+			hadoop 20. The user can always overwrite it - if there is a bug in
+			CombinedHiveInputFormat, it can always be manually set to
+			HiveInputFormat. </description>
+	</property>
+
+	<property>
+		<name>hive.udtf.auto.progress</name>
+		<value>false</value>
+		<description>Whether Hive should automatically send progress
+			information to TaskTracker when using UDTF's to prevent the task
+			getting killed because of inactivity. Users should be cautious
+			because this may prevent TaskTracker from killing tasks with infinte
+			loops.  </description>
+	</property>
+
+	<property>
+		<name>hive.mapred.reduce.tasks.speculative.execution</name>
+		<value>true</value>
+		<description>Whether speculative execution for reducers should be
+			turned on. </description>
+	</property>
+
+	<property>
+		<name>hive.exec.counters.pull.interval</name>
+		<value>1000</value>
+		<description>The interval with which to poll the JobTracker for the
+			counters the running job. The smaller it is the more load there will
+			be on the jobtracker, the higher it is the less granular the caught
+			will be.</description>
+	</property>
+
+	<property>
+		<name>hive.enforce.bucketing</name>
+		<value>false</value>
+		<description>Whether bucketing is enforced. If true, while inserting
+			into the table, bucketing is enforced. </description>
+	</property>
+
+	<property>
+		<name>hive.enforce.sorting</name>
+		<value>false</value>
+		<description>Whether sorting is enforced. If true, while inserting
+			into the table, sorting is enforced. </description>
+	</property>
+
+	<property>
+		<name>hive.metastore.ds.connection.url.hook</name>
+		<value></value>
+		<description>Name of the hook to use for retriving the JDO connection
+			URL. If empty, the value in javax.jdo.option.ConnectionURL is used
+		</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.ds.retry.attempts</name>
+		<value>1</value>
+		<description>The number of times to retry a metastore call if there
+			were a connection error</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.ds.retry.interval</name>
+		<value>1000</value>
+		<description>The number of miliseconds between metastore retry
+			attempts</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.server.min.threads</name>
+		<value>200</value>
+		<description>Minimum number of worker threads in the Thrift server's
+			pool.</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.server.max.threads</name>
+		<value>100000</value>
+		<description>Maximum number of worker threads in the Thrift server's
+			pool.</description>
+	</property>
+
+	<property>
+		<name>hive.metastore.server.tcp.keepalive</name>
+		<value>true</value>
+		<description>Whether to enable TCP keepalive for the metastore server.
+			Keepalive will prevent accumulation of half-open connections.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.optimize.reducededuplication</name>
+		<value>true</value>
+		<description>Remove extra map-reduce jobs if the data is already
+			clustered by the same key which needs to be used again. This should
+			always be set to true. Since it is a new feature, it has been made
+			configurable.</description>
+	</property>
+
+	<property>
+		<name>hive.exec.dynamic.partition</name>
+		<value>false</value>
+		<description>Whether or not to allow dynamic partitions in DML/DDL.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.exec.dynamic.partition.mode</name>
+		<value>strict</value>
+		<description>In strict mode, the user must specify at least one static
+			partition in case the user accidentally overwrites all partitions.
+		</description>
+	</property>
+
+	<property>
+		<name>hive.exec.max.dynamic.partitions</name>
+		<value>1000</value>
+		<description>Maximum number of dynamic partitions allowed to be
+			created in total.</description>
+	</property>
+
+	<property>
+		<name>hive.exec.max.dynamic.partitions.pernode</name>
+		<value>100</value>
+		<description>Maximum number of dynamic partitions allowed to be
+			created in each mapper/reducer node.</description>
+	</property>
+
+	<property>
+		<name>hive.default.partition.name</name>
+		<value>__HIVE_DEFAULT_PARTITION__</value>
+		<description>The default partition name in case the dynamic partition
+			column value is null/empty string or anyother values that cannot be
+			escaped. This value must not contain any special character used in
+			HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the
+			dynamic partition value should not contain this value to avoid
+			confusions.</description>
+	</property>
+
+	<property>
+		<name>fs.har.impl</name>
+		<value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
+		<description>The implementation for accessing Hadoop Archives. Note
+			that this won't be applicable to Hadoop vers less than 0.20
+		</description>
+	</property>
+
+	<property>
+		<name>hive.archive.enabled</name>
+		<value>false</value>
+		<description>Whether archiving operations are permitted</description>
+	</property>
+
+	<property>
+		<name>hive.archive.har.parentdir.settable</name>
+		<value>false</value>
+		<description>In new Hadoop versions, the parent directory must be set
+			while
+			creating a HAR. Because this functionality is hard to detect
+			with just
+			version
+			numbers, this conf var needs to be set manually.
+		</description>
+	</property>
+
+	<!-- HBase Storage Handler Parameters -->
+
+	<property>
+		<name>hive.hbase.wal.enabled</name>
+		<value>true</value>
+		<description>Whether writes to HBase should be forced to the
+			write-ahead log. Disabling this improves HBase write performance at
+			the risk of lost writes in case of a crash.</description>
+	</property>
+
+	<property>
+		<name>hive.exec.drop.ignorenonexistent</name>
+		<value>true</value>
+		<description>drop table always works.</description>
+	</property>
+
+</configuration>
diff --git a/hivesterix/src/test/resources/optimizerts/ignore.txt b/hivesterix/src/test/resources/optimizerts/ignore.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/ignore.txt
diff --git a/hivesterix/src/test/resources/optimizerts/queries/h11_share_scan.hive b/hivesterix/src/test/resources/optimizerts/queries/h11_share_scan.hive
new file mode 100644
index 0000000..a5c46c6
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/h11_share_scan.hive
@@ -0,0 +1,10 @@
+-- union case: both subqueries are map jobs on same input, followed by filesink
+DROP TABLE IF EXISTS src;
+
+CREATE TABLE src(key int, value int);
+CREATE TABLE src1(key int, value int);
+CREATE TABLE src2(key int);
+
+FROM src
+INSERT overwrite table src1 select * where key < 5
+INSERT overwrite table src2 select key where key > 10;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/h12_select_struct.hive b/hivesterix/src/test/resources/optimizerts/queries/h12_select_struct.hive
new file mode 100644
index 0000000..24ca265
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/h12_select_struct.hive
@@ -0,0 +1,6 @@
+-- union case: both subqueries are map jobs on same input, followed by filesink
+DROP TABLE IF EXISTS src;
+
+CREATE TABLE src(key int, value struct<v1:int, v2:int>);
+
+select value from src;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q10_returned_item.hive b/hivesterix/src/test/resources/optimizerts/queries/q10_returned_item.hive
new file mode 100644
index 0000000..3f1214a
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q10_returned_item.hive
@@ -0,0 +1,37 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS q10_returned_item;
+
+-- create the tables and load the data
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+
+-- create the result table
+create table q10_returned_item (c_custkey int, c_name string, revenue double, c_acctbal string, n_name string, c_address string, c_phone string, c_comment string);
+
+set mapred.min.split.size=536870912;
+set hive.exec.reducers.bytes.per.reducer=1024000000;
+
+-- the query
+insert overwrite table q10_returned_item
+select 
+  c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, 
+  c_acctbal, n_name, c_address, c_phone, c_comment
+from
+  customer c join orders o 
+  on 
+    c.c_custkey = o.o_custkey and o.o_orderdate >= '1993-10-01' and o.o_orderdate < '1994-01-01'
+  join nation n 
+  on 
+    c.c_nationkey = n.n_nationkey
+  join lineitem l 
+  on 
+    l.l_orderkey = o.o_orderkey and l.l_returnflag = 'R'
+group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment 
+order by revenue desc 
+limit 20;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q11_important_stock.hive b/hivesterix/src/test/resources/optimizerts/queries/q11_important_stock.hive
new file mode 100644
index 0000000..8550b72
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q11_important_stock.hive
@@ -0,0 +1,47 @@
+DROP TABLE IF EXISTS partsupp;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS q11_important_stock;
+DROP TABLE IF EXISTS q11_part_tmp;
+DROP TABLE IF EXISTS q11_sum_tmp;
+
+-- create tables and load data
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+create external table partsupp (PS_PARTKEY INT, PS_SUPPKEY INT, PS_AVAILQTY INT, PS_SUPPLYCOST DOUBLE, PS_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION'/tpch/partsupp';
+
+-- create the target table
+create table q11_important_stock(ps_partkey INT, value DOUBLE);
+create table q11_part_tmp(ps_partkey int, part_value double);
+create table q11_sum_tmp(total_value double);
+
+-- the query
+insert overwrite table q11_part_tmp
+select 
+  ps_partkey, sum(ps_supplycost * ps_availqty) as part_value 
+from
+  nation n join supplier s 
+  on 
+    s.s_nationkey = n.n_nationkey and n.n_name = 'GERMANY'
+  join partsupp ps 
+  on 
+    ps.ps_suppkey = s.s_suppkey
+group by ps_partkey;
+
+insert overwrite table q11_sum_tmp
+select 
+  sum(part_value) as total_value
+from 
+  q11_part_tmp;
+
+insert overwrite table q11_important_stock
+select 
+  ps_partkey, part_value as value
+from
+  (
+    select ps_partkey, part_value, total_value
+    from q11_part_tmp join q11_sum_tmp
+  ) a
+where part_value > total_value * 0.0001
+order by value desc;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q12_shipping.hive b/hivesterix/src/test/resources/optimizerts/queries/q12_shipping.hive
new file mode 100644
index 0000000..062f7b9
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q12_shipping.hive
@@ -0,0 +1,42 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS q12_shipping;
+
+-- create the tables and load the data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+
+-- create the result table
+create table q12_shipping(l_shipmode string, high_line_count double, low_line_count double);
+
+set mapred.min.split.size=536870912;
+set hive.exec.reducers.bytes.per.reducer=1225000000;
+
+-- the query
+insert overwrite table q12_shipping
+select 
+  l_shipmode,
+  sum(case
+    when o_orderpriority ='1-URGENT'
+         or o_orderpriority ='2-HIGH'
+    then 1
+    else 0
+end
+  ) as high_line_count,
+  sum(case
+    when o_orderpriority <> '1-URGENT'
+         and o_orderpriority <> '2-HIGH'
+    then 1
+    else 0
+end
+  ) as low_line_count
+from
+  orders o join lineitem l 
+  on 
+    o.o_orderkey = l.l_orderkey and l.l_commitdate < l.l_receiptdate
+and l.l_shipdate < l.l_commitdate and l.l_receiptdate >= '1994-01-01' 
+and l.l_receiptdate < '1995-01-01'
+where 
+  l.l_shipmode = 'MAIL' or l.l_shipmode = 'SHIP'
+group by l_shipmode
+order by l_shipmode;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q13_customer_distribution.hive b/hivesterix/src/test/resources/optimizerts/queries/q13_customer_distribution.hive
new file mode 100644
index 0000000..a799008
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q13_customer_distribution.hive
@@ -0,0 +1,27 @@
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS q13_customer_distribution;
+
+-- create the tables and load the data
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+
+-- create the result table
+create table q13_customer_distribution (c_count int, custdist int);
+
+-- the query
+insert overwrite table q13_customer_distribution
+select 
+  c_count, count(1) as custdist
+from 
+  (select 
+     c_custkey, count(o_orderkey) as c_count
+   from 
+     customer c left outer join orders o 
+     on 
+       c.c_custkey = o.o_custkey and not o.o_comment like '%special%requests%'
+   group by c_custkey
+   ) c_orders
+group by c_count
+order by custdist desc, c_count desc;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q14_promotion_effect.hive b/hivesterix/src/test/resources/optimizerts/queries/q14_promotion_effect.hive
new file mode 100644
index 0000000..988f400
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q14_promotion_effect.hive
@@ -0,0 +1,28 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS part;
+DROP TABLE IF EXISTS q14_promotion_effect;
+
+-- create the tables and load the data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+
+-- create the result table
+create table q14_promotion_effect(promo_revenue double);
+
+set mapred.min.split.size=536870912;
+set hive.exec.reducers.bytes.per.reducer=1040000000;
+
+-- the query
+insert overwrite table q14_promotion_effect
+select 
+  100.00 * sum(case
+               when p_type like 'PROMO%'
+               then l_extendedprice*(1-l_discount)
+               else 0.0
+               end
+  ) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+from 
+  part p join lineitem l 
+  on 
+    l.l_partkey = p.p_partkey and l.l_shipdate >= '1995-09-01' and l.l_shipdate < '1995-10-01';
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q15_top_supplier.hive b/hivesterix/src/test/resources/optimizerts/queries/q15_top_supplier.hive
new file mode 100644
index 0000000..04064ed
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q15_top_supplier.hive
@@ -0,0 +1,45 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS revenue;
+DROP TABLE IF EXISTS max_revenue;
+DROP TABLE IF EXISTS q15_top_supplier;
+
+-- create the tables and load the data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+
+-- create result tables
+create table revenue(supplier_no int, total_revenue double); 
+create table max_revenue(max_revenue double); 
+create table q15_top_supplier(s_suppkey int, s_name string, s_address string, s_phone string, total_revenue double);
+
+
+set mapred.min.split.size=536870912;
+
+-- the query
+insert overwrite table revenue
+select 
+  l_suppkey as supplier_no, sum(l_extendedprice * (1 - l_discount)) as total_revenue
+from 
+  lineitem
+where 
+  l_shipdate >= '1996-01-01' and l_shipdate < '1996-04-01'
+group by l_suppkey;
+
+insert overwrite table max_revenue
+select 
+  max(total_revenue)
+from 
+  revenue;
+
+insert overwrite table q15_top_supplier
+select 
+  s_suppkey, s_name, s_address, s_phone, total_revenue
+from supplier s join revenue r 
+  on 
+    s.s_suppkey = r.supplier_no
+  join max_revenue m 
+  on 
+    r.total_revenue = m.max_revenue
+order by s_suppkey;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q16_parts_supplier_relationship.hive b/hivesterix/src/test/resources/optimizerts/queries/q16_parts_supplier_relationship.hive
new file mode 100644
index 0000000..971ef99
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q16_parts_supplier_relationship.hive
@@ -0,0 +1,53 @@
+DROP TABLE IF EXISTS partsupp;
+DROP TABLE IF EXISTS part;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS q16_parts_supplier_relationship;
+DROP TABLE IF EXISTS q16_tmp;
+DROP TABLE IF EXISTS supplier_tmp;
+
+-- create the tables and load the data
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+create external table partsupp (PS_PARTKEY INT, PS_SUPPKEY INT, PS_AVAILQTY INT, PS_SUPPLYCOST DOUBLE, PS_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION'/tpch/partsupp';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+
+-- create the result table
+create table q16_parts_supplier_relationship(p_brand string, p_type string, p_size int, supplier_cnt int);
+create table q16_tmp(p_brand string, p_type string, p_size int, ps_suppkey int);
+create table supplier_tmp(s_suppkey int);
+
+-- the query
+insert overwrite table supplier_tmp
+select 
+  s_suppkey
+from 
+  supplier
+where 
+  not s_comment like '%Customer%Complaints%';
+
+insert overwrite table q16_tmp
+select 
+  p_brand, p_type, p_size, ps_suppkey
+from 
+  partsupp ps join part p 
+  on 
+    p.p_partkey = ps.ps_partkey and p.p_brand <> 'Brand#45' 
+    and not p.p_type like 'MEDIUM POLISHED%'
+  join supplier_tmp s 
+  on 
+    ps.ps_suppkey = s.s_suppkey;
+
+insert overwrite table q16_parts_supplier_relationship
+select 
+  p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt
+from 
+  (select 
+     * 
+   from
+     q16_tmp 
+   where p_size = 49 or p_size = 14 or p_size = 23 or
+         p_size = 45 or p_size = 19 or p_size = 3 or
+         p_size = 36 or p_size = 9
+) q16_all
+group by p_brand, p_type, p_size
+order by supplier_cnt desc, p_brand, p_type, p_size;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q17_small_quantity_order_revenue.hive b/hivesterix/src/test/resources/optimizerts/queries/q17_small_quantity_order_revenue.hive
new file mode 100644
index 0000000..65291cd
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q17_small_quantity_order_revenue.hive
@@ -0,0 +1,38 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS part;
+DROP TABLE IF EXISTS q17_small_quantity_order_revenue;
+DROP TABLE IF EXISTS lineitem_tmp;
+
+-- create the tables and load the data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+
+-- create the result table
+create table q17_small_quantity_order_revenue (avg_yearly double);
+create table lineitem_tmp (t_partkey int, t_avg_quantity double);
+
+-- the query
+insert overwrite table lineitem_tmp
+select 
+  l_partkey as t_partkey, 0.2 * avg(l_quantity) as t_avg_quantity
+from 
+  lineitem
+group by l_partkey;
+
+insert overwrite table q17_small_quantity_order_revenue
+select
+  sum(l_extendedprice) / 7.0 as avg_yearly
+from
+  (select l_quantity, l_extendedprice, t_avg_quantity from
+   lineitem_tmp t join
+     (select
+        l_quantity, l_partkey, l_extendedprice
+      from
+        part p join lineitem l
+        on
+          p.p_partkey = l.l_partkey
+          and p.p_brand = 'Brand#23'
+          and p.p_container = 'MED BOX'
+      ) l1 on l1.l_partkey = t.t_partkey
+   ) a
+where l_quantity < t_avg_quantity;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q18_large_volume_customer.hive b/hivesterix/src/test/resources/optimizerts/queries/q18_large_volume_customer.hive
new file mode 100644
index 0000000..76d0475
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q18_large_volume_customer.hive
@@ -0,0 +1,43 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS q18_tmp;
+DROP TABLE IF EXISTS q18_large_volume_customer;
+
+-- create the tables and load the data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+
+-- create the result tables
+create table q18_tmp(l_orderkey int, t_sum_quantity double);
+create table q18_large_volume_customer(c_name string, c_custkey int, o_orderkey int, o_orderdate string, o_totalprice double, sum_quantity double);
+
+set mapred.min.split.size=268435456;
+set hive.exec.reducers.bytes.per.reducer=1164000000;
+
+-- the query
+insert overwrite table q18_tmp
+select 
+  l_orderkey, sum(l_quantity) as t_sum_quantity
+from 
+  lineitem
+group by l_orderkey;
+
+insert overwrite table q18_large_volume_customer
+select 
+  c_name,c_custkey,o_orderkey,o_orderdate,o_totalprice,sum(l_quantity)
+from 
+  customer c join orders o 
+  on 
+    c.c_custkey = o.o_custkey
+  join q18_tmp t 
+  on 
+    o.o_orderkey = t.l_orderkey and t.t_sum_quantity > 300
+  join lineitem l 
+  on 
+    o.o_orderkey = l.l_orderkey
+group by c_name,c_custkey,o_orderkey,o_orderdate,o_totalprice
+order by o_totalprice desc,o_orderdate
+limit 100;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q19_discounted_revenue.hive b/hivesterix/src/test/resources/optimizerts/queries/q19_discounted_revenue.hive
new file mode 100644
index 0000000..fd330cd
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q19_discounted_revenue.hive
@@ -0,0 +1,49 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS part;
+DROP TABLE IF EXISTS q19_discounted_revenue;
+
+-- create the tables and load the data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+
+-- create the result table
+create table q19_discounted_revenue(revenue double);
+
+set mapred.min.split.size=268435456;
+set hive.exec.reducers.bytes.per.reducer=1040000000;
+
+-- the query
+insert overwrite table q19_discounted_revenue
+select
+  sum(l_extendedprice * (1 - l_discount) ) as revenue
+from
+  	part p join lineitem l
+  on 
+    p.p_partkey = l.l_partkey    
+where
+  (
+    p_brand = 'Brand#12'
+	and p_container REGEXP 'SM CASE||SM BOX||SM PACK||SM PKG'
+	and l_quantity >= 1 and l_quantity <= 11
+	and p_size >= 1 and p_size <= 5
+	and l_shipmode REGEXP 'AIR||AIR REG'
+	and l_shipinstruct = 'DELIVER IN PERSON'
+  ) 
+  or 
+  (
+    p_brand = 'Brand#23'
+	and p_container REGEXP 'MED BAG||MED BOX||MED PKG||MED PACK'
+	and l_quantity >= 10 and l_quantity <= 20
+	and p_size >= 1 and p_size <= 10
+	and l_shipmode REGEXP 'AIR||AIR REG'
+	and l_shipinstruct = 'DELIVER IN PERSON'
+  )
+  or
+  (
+	p_brand = 'Brand#34'
+	and p_container REGEXP 'LG CASE||LG BOX||LG PACK||LG PKG'
+	and l_quantity >= 20 and l_quantity <= 30
+	and p_size >= 1 and p_size <= 15
+	and l_shipmode REGEXP 'AIR||AIR REG'
+	and l_shipinstruct = 'DELIVER IN PERSON'
+  );
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q1_pricing_summary_report.hive b/hivesterix/src/test/resources/optimizerts/queries/q1_pricing_summary_report.hive
new file mode 100644
index 0000000..a002068
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q1_pricing_summary_report.hive
@@ -0,0 +1,21 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS q1_pricing_summary_report;
+
+-- create tables and load data
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+
+-- create the target table
+CREATE TABLE q1_pricing_summary_report ( L_RETURNFLAG STRING, L_LINESTATUS STRING, SUM_QTY DOUBLE, SUM_BASE_PRICE DOUBLE, SUM_DISC_PRICE DOUBLE, SUM_CHARGE DOUBLE, AVE_QTY DOUBLE, AVE_PRICE DOUBLE, AVE_DISC DOUBLE, COUNT_ORDER INT);
+
+set mapred.min.split.size=536870912;
+
+-- the query
+INSERT OVERWRITE TABLE q1_pricing_summary_report 
+SELECT 
+  L_RETURNFLAG, L_LINESTATUS, SUM(L_QUANTITY), SUM(L_EXTENDEDPRICE), SUM(L_EXTENDEDPRICE*(1-L_DISCOUNT)), SUM(L_EXTENDEDPRICE*(1-L_DISCOUNT)*(1+L_TAX)), AVG(L_QUANTITY), AVG(L_EXTENDEDPRICE), AVG(L_DISCOUNT), COUNT(1) 
+FROM 
+  lineitem 
+WHERE 
+  L_SHIPDATE<='1998-09-02' 
+GROUP BY L_RETURNFLAG, L_LINESTATUS 
+ORDER BY L_RETURNFLAG, L_LINESTATUS;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q20_potential_part_promotion.hive b/hivesterix/src/test/resources/optimizerts/queries/q20_potential_part_promotion.hive
new file mode 100644
index 0000000..63297e6
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q20_potential_part_promotion.hive
@@ -0,0 +1,78 @@
+DROP TABLE IF EXISTS partsupp;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS q20_tmp1;
+DROP TABLE IF EXISTS q20_tmp2;
+DROP TABLE IF EXISTS q20_tmp3;
+DROP TABLE IF EXISTS q20_tmp4;
+DROP TABLE IF EXISTS q20_potential_part_promotion;
+
+-- create tables and load data
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+create external table partsupp (PS_PARTKEY INT, PS_SUPPKEY INT, PS_AVAILQTY INT, PS_SUPPLYCOST DOUBLE, PS_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION'/tpch/partsupp';
+
+-- create the target table
+create table q20_tmp1(p_partkey int);
+create table q20_tmp2(l_partkey int, l_suppkey int, sum_quantity double);
+create table q20_tmp3(ps_suppkey int, ps_availqty int, sum_quantity double);
+create table q20_tmp4(ps_suppkey int);
+create table q20_potential_part_promotion(s_name string, s_address string);
+
+set mapred.min.split.size=536870912;
+
+-- the query
+insert overwrite table q20_tmp1
+select distinct p_partkey
+from
+  part 
+where 
+  p_name like 'forest%';
+
+insert overwrite table q20_tmp2
+select 
+  l_partkey, l_suppkey, 0.5 * sum(l_quantity)
+from
+  lineitem
+where
+  l_shipdate >= '1994-01-01'
+  and l_shipdate < '1995-01-01'
+group by l_partkey, l_suppkey;
+
+insert overwrite table q20_tmp3
+select 
+  ps_suppkey, ps_availqty, sum_quantity
+from  
+  partsupp ps join q20_tmp1 t1 
+  on 
+    ps.ps_partkey = t1.p_partkey
+  join q20_tmp2 t2 
+  on 
+    ps.ps_partkey = t2.l_partkey and ps.ps_suppkey = t2.l_suppkey;
+
+insert overwrite table q20_tmp4
+select 
+  ps_suppkey
+from 
+  q20_tmp3
+where 
+  ps_availqty > sum_quantity
+group by ps_suppkey;
+
+insert overwrite table q20_potential_part_promotion
+select 
+  s_name, s_address
+from 
+  supplier s join nation n
+  on
+    s.s_nationkey = n.n_nationkey
+    and n.n_name = 'CANADA'
+  join q20_tmp4 t4
+  on 
+    s.s_suppkey = t4.ps_suppkey
+order by s_name;
+
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q21_suppliers_who_kept_orders_waiting.hive b/hivesterix/src/test/resources/optimizerts/queries/q21_suppliers_who_kept_orders_waiting.hive
new file mode 100644
index 0000000..a467f60
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q21_suppliers_who_kept_orders_waiting.hive
@@ -0,0 +1,74 @@
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS q21_tmp1;
+DROP TABLE IF EXISTS q21_tmp2;
+DROP TABLE IF EXISTS q21_suppliers_who_kept_orders_waiting;
+
+-- create tables and load data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+
+-- create target tables
+create table q21_tmp1(l_orderkey int, count_suppkey int, max_suppkey int);
+create table q21_tmp2(l_orderkey int, count_suppkey int, max_suppkey int);
+create table q21_suppliers_who_kept_orders_waiting(s_name string, numwait int);
+
+-- the query
+insert overwrite table q21_tmp1
+select
+  l_orderkey, count(distinct l_suppkey), max(l_suppkey) as max_suppkey
+from
+  lineitem
+group by l_orderkey;
+
+insert overwrite table q21_tmp2
+select
+  l_orderkey, count(distinct l_suppkey), max(l_suppkey) as max_suppkey
+from
+  lineitem
+where
+  l_receiptdate > l_commitdate
+group by l_orderkey;
+
+insert overwrite table q21_suppliers_who_kept_orders_waiting
+select
+  s_name, count(1) as numwait
+from
+  (select s_name from
+(select s_name, t2.l_orderkey, l_suppkey, count_suppkey, max_suppkey 
+ from q21_tmp2 t2 right outer join
+      (select s_name, l_orderkey, l_suppkey from
+         (select s_name, t1.l_orderkey, l_suppkey, count_suppkey, max_suppkey
+          from
+            q21_tmp1 t1 join
+            (select s_name, l_orderkey, l_suppkey
+             from 
+               orders o join
+               (select s_name, l_orderkey, l_suppkey
+                from
+                  nation n join supplier s
+                  on
+                    s.s_nationkey = n.n_nationkey
+                    and n.n_name = 'SAUDI ARABIA'
+                  join lineitem l
+                  on
+                    s.s_suppkey = l.l_suppkey
+                where
+                  l.l_receiptdate > l.l_commitdate
+                ) l1 on o.o_orderkey = l1.l_orderkey and o.o_orderstatus = 'F'
+             ) l2 on l2.l_orderkey = t1.l_orderkey
+          ) a
+          where
+           (count_suppkey > 1) or ((count_suppkey=1) and (l_suppkey <> max_suppkey))
+       ) l3 on l3.l_orderkey = t2.l_orderkey
+    ) b
+    where
+     (count_suppkey is null) or ((count_suppkey=1) and (l_suppkey = max_suppkey))
+  )c
+group by s_name
+order by numwait desc, s_name
+limit 100;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q22_global_sales_opportunity.hive b/hivesterix/src/test/resources/optimizerts/queries/q22_global_sales_opportunity.hive
new file mode 100644
index 0000000..a7d6c72
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q22_global_sales_opportunity.hive
@@ -0,0 +1,70 @@
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS q22_customer_tmp;
+DROP TABLE IF EXISTS q22_customer_tmp1;
+DROP TABLE IF EXISTS q22_orders_tmp;
+DROP TABLE IF EXISTS q22_global_sales_opportunity;
+
+-- create tables and load data
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+
+-- create target tables
+create table q22_customer_tmp(c_acctbal double, c_custkey int, cntrycode string);
+create table q22_customer_tmp1(avg_acctbal double);
+create table q22_orders_tmp(o_custkey int);
+create table q22_global_sales_opportunity(cntrycode string, numcust int, totacctbal double);
+
+-- the query
+insert overwrite table q22_customer_tmp
+select 
+  c_acctbal, c_custkey, substr(c_phone, 1, 2) as cntrycode
+from 
+  customer
+where 
+  substr(c_phone, 1, 2) = '13' or
+  substr(c_phone, 1, 2) = '31' or
+  substr(c_phone, 1, 2) = '23' or
+  substr(c_phone, 1, 2) = '29' or
+  substr(c_phone, 1, 2) = '30' or
+  substr(c_phone, 1, 2) = '18' or
+  substr(c_phone, 1, 2) = '17';
+ 
+insert overwrite table q22_customer_tmp1
+select
+  avg(c_acctbal)
+from
+  q22_customer_tmp
+where
+  c_acctbal > 0.00;
+
+insert overwrite table q22_orders_tmp
+select 
+  o_custkey 
+from 
+  orders
+group by 
+  o_custkey;
+
+insert overwrite table q22_global_sales_opportunity
+select
+  cntrycode, count(1) as numcust, sum(c_acctbal) as totacctbal
+from
+(
+  select cntrycode, c_acctbal, avg_acctbal from
+  q22_customer_tmp1 ct1 join
+  (
+    select cntrycode, c_acctbal from
+      q22_orders_tmp ot 
+      right outer join q22_customer_tmp ct 
+      on
+        ct.c_custkey = ot.o_custkey
+    where
+      o_custkey is null
+  ) ct2
+) a
+where
+  c_acctbal > avg_acctbal
+group by cntrycode
+order by cntrycode;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q2_minimum_cost_supplier.hive b/hivesterix/src/test/resources/optimizerts/queries/q2_minimum_cost_supplier.hive
new file mode 100644
index 0000000..061c5e7
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q2_minimum_cost_supplier.hive
@@ -0,0 +1,56 @@
+DROP TABLE IF EXISTS part;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS partsupp;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS region;
+DROP TABLE IF EXISTS q2_minimum_cost_supplier;
+DROP TABLE IF EXISTS q2_minimum_cost_supplier_tmp1;
+DROP TABLE IF EXISTS q2_minimum_cost_supplier_tmp2;
+
+-- create the tables and load the data
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table partsupp (PS_PARTKEY INT, PS_SUPPKEY INT, PS_AVAILQTY INT, PS_SUPPLYCOST DOUBLE, PS_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION'/tpch/partsupp';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+create external table region (R_REGIONKEY INT, R_NAME STRING, R_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/region';
+
+-- create result tables
+create table q2_minimum_cost_supplier_tmp1 (s_acctbal double, s_name string, n_name string, p_partkey int, ps_supplycost double, p_mfgr string, s_address string, s_phone string, s_comment string);
+create table q2_minimum_cost_supplier_tmp2 (p_partkey int, ps_min_supplycost double);
+create table q2_minimum_cost_supplier (s_acctbal double, s_name string, n_name string, p_partkey int, p_mfgr string, s_address string, s_phone string, s_comment string);
+
+-- the query
+insert overwrite table q2_minimum_cost_supplier_tmp1 
+select 
+  s.s_acctbal, s.s_name, n.n_name, p.p_partkey, ps.ps_supplycost, p.p_mfgr, s.s_address, s.s_phone, s.s_comment 
+from 
+  nation n join region r 
+  on 
+    n.n_regionkey = r.r_regionkey and r.r_name = 'EUROPE' 
+  join supplier s 
+  on 
+s.s_nationkey = n.n_nationkey 
+  join partsupp ps 
+  on  
+s.s_suppkey = ps.ps_suppkey 
+  join part p 
+  on 
+    p.p_partkey = ps.ps_partkey and p.p_size = 15 and p.p_type like '%BRASS' ;
+
+insert overwrite table q2_minimum_cost_supplier_tmp2 
+select 
+  p_partkey, min(ps_supplycost) 
+from  
+  q2_minimum_cost_supplier_tmp1 
+group by p_partkey;
+
+insert overwrite table q2_minimum_cost_supplier 
+select 
+  t1.s_acctbal, t1.s_name, t1.n_name, t1.p_partkey, t1.p_mfgr, t1.s_address, t1.s_phone, t1.s_comment 
+from 
+  q2_minimum_cost_supplier_tmp1 t1 join q2_minimum_cost_supplier_tmp2 t2 
+on 
+  t1.p_partkey = t2.p_partkey and t1.ps_supplycost=t2.ps_min_supplycost 
+order by s_acctbal desc, n_name, s_name, p_partkey 
+limit 100;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q3_shipping_priority.hive b/hivesterix/src/test/resources/optimizerts/queries/q3_shipping_priority.hive
new file mode 100644
index 0000000..0049eb3
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q3_shipping_priority.hive
@@ -0,0 +1,29 @@
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS q3_shipping_priority;
+
+-- create tables and load data
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+
+-- create the target table
+create table q3_shipping_priority (l_orderkey int, revenue double, o_orderdate string, o_shippriority int);
+
+set mapred.min.split.size=536870912;
+set hive.exec.reducers.bytes.per.reducer=1024000000;
+
+-- the query
+Insert overwrite table q3_shipping_priority 
+select 
+  l_orderkey, (l_extendedprice*(1-l_discount)) as revenue, o_orderdate, o_shippriority 
+from 
+  customer c join orders o 
+    on c.c_mktsegment = 'BUILDING' and c.c_custkey = o.o_custkey 
+  join lineitem l 
+    on l.l_orderkey = o.o_orderkey and l.l_linenumber<3
+-- group by l_orderkey, o_orderdate, o_shippriority 
+order by revenue desc
+limit 10;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q4_order_priority.hive b/hivesterix/src/test/resources/optimizerts/queries/q4_order_priority.hive
new file mode 100644
index 0000000..aa828e9
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q4_order_priority.hive
@@ -0,0 +1,30 @@
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS q4_order_priority_tmp;
+DROP TABLE IF EXISTS q4_order_priority;
+
+-- create tables and load data
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+
+-- create the target table
+CREATE TABLE q4_order_priority_tmp (O_ORDERKEY INT);
+CREATE TABLE q4_order_priority (O_ORDERPRIORITY STRING, ORDER_COUNT INT);
+
+set mapred.min.split.size=536870912;
+-- the query
+INSERT OVERWRITE TABLE q4_order_priority_tmp 
+select 
+  DISTINCT l_orderkey 
+from 
+  lineitem 
+where 
+  l_commitdate < l_receiptdate;
+INSERT OVERWRITE TABLE q4_order_priority 
+select o_orderpriority, count(1) as order_count 
+from 
+  orders o join q4_order_priority_tmp t 
+  on 
+o.o_orderkey = t.o_orderkey and o.o_orderdate >= '1993-07-01' and o.o_orderdate < '1993-10-01' 
+group by o_orderpriority 
+order by o_orderpriority;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q5_local_supplier_volume.hive b/hivesterix/src/test/resources/optimizerts/queries/q5_local_supplier_volume.hive
new file mode 100644
index 0000000..a975ce1
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q5_local_supplier_volume.hive
@@ -0,0 +1,42 @@
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS region;
+DROP TABLE IF EXISTS q5_local_supplier_volume;
+
+-- create tables and load data
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+create external table region (R_REGIONKEY INT, R_NAME STRING, R_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/region';
+
+-- create the target table
+create table q5_local_supplier_volume (N_NAME STRING, REVENUE DOUBLE);
+
+set mapred.min.split.size=536870912;
+
+-- the query
+insert overwrite table q5_local_supplier_volume 
+select 
+  n_name, sum(l_extendedprice * (1 - l_discount)) as revenue 
+from
+  customer c join
+    ( select n_name, l_extendedprice, l_discount, s_nationkey, o_custkey from orders o join
+      ( select n_name, l_extendedprice, l_discount, l_orderkey, s_nationkey from lineitem l join
+        ( select n_name, s_suppkey, s_nationkey from supplier s join
+          ( select n_name, n_nationkey 
+            from nation n join region r 
+            on n.n_regionkey = r.r_regionkey and r.r_name = 'ASIA'
+          ) n1 on s.s_nationkey = n1.n_nationkey
+        ) s1 on l.l_suppkey = s1.s_suppkey
+      ) l1 on l1.l_orderkey = o.o_orderkey and o.o_orderdate >= '1994-01-01' 
+              and o.o_orderdate < '1995-01-01'
+) o1 
+on c.c_nationkey = o1.s_nationkey and c.c_custkey = o1.o_custkey
+group by n_name 
+order by revenue desc;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q6_forecast_revenue_change.hive b/hivesterix/src/test/resources/optimizerts/queries/q6_forecast_revenue_change.hive
new file mode 100644
index 0000000..d8cb9b9
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q6_forecast_revenue_change.hive
@@ -0,0 +1,21 @@
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS q6_forecast_revenue_change;
+
+-- create tables and load data
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+
+-- create the target table
+create table q6_forecast_revenue_change (revenue double);
+
+-- the query
+insert overwrite table q6_forecast_revenue_change 
+select 
+  sum(l_extendedprice*l_discount) as revenue
+from 
+  lineitem
+where 
+  l_shipdate >= '1994-01-01'
+  and l_shipdate < '1995-01-01'
+  and l_discount >= 0.05 and l_discount <= 0.07
+  and l_quantity < 24;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q7_volume_shipping.hive b/hivesterix/src/test/resources/optimizerts/queries/q7_volume_shipping.hive
new file mode 100644
index 0000000..3dfb22a
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q7_volume_shipping.hive
@@ -0,0 +1,71 @@
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS q7_volume_shipping;
+DROP TABLE IF EXISTS q7_volume_shipping_tmp;
+
+-- create tables and load data
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+
+-- create the target table
+create table q7_volume_shipping (supp_nation string, cust_nation string, l_year int, revenue double);
+create table q7_volume_shipping_tmp(supp_nation string, cust_nation string, s_nationkey int, c_nationkey int);
+
+set mapred.min.split.size=536870912;
+set hive.exec.reducers.bytes.per.reducer=1225000000;
+
+-- the query
+insert overwrite table q7_volume_shipping_tmp
+select 
+  * 
+from
+  (
+    select 
+      n1.n_name as supp_nation, n2.n_name as cust_nation, n1.n_nationkey as s_nationkey,      
+      n2.n_nationkey as c_nationkey
+from 
+  nation n1 join nation n2 
+  on 
+    n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY'
+    UNION ALL
+select 
+  n1.n_name as supp_nation, n2.n_name as cust_nation, n1.n_nationkey as s_nationkey, 
+  n2.n_nationkey as c_nationkey
+from 
+  nation n1 join nation n2 
+  on 
+    n2.n_name = 'FRANCE' and n1.n_name = 'GERMANY'
+) a;
+
+insert overwrite table q7_volume_shipping 
+select 
+  supp_nation, cust_nation, l_year, sum(volume) as revenue
+from 
+  (
+    select
+      supp_nation, cust_nation, year(l_shipdate) as l_year, 
+      l_extendedprice * (1 - l_discount) as volume
+    from
+      q7_volume_shipping_tmp t join
+        (select l_shipdate, l_extendedprice, l_discount, c_nationkey, s_nationkey 
+         from supplier s join
+           (select l_shipdate, l_extendedprice, l_discount, l_suppkey, c_nationkey 
+            from customer c join
+              (select l_shipdate, l_extendedprice, l_discount, l_suppkey, o_custkey 
+               from orders o join lineitem l 
+               on 
+                 o.o_orderkey = l.l_orderkey and l.l_shipdate >= '1995-01-01' 
+                 and l.l_shipdate <= '1996-12-31'
+               ) l1 on c.c_custkey = l1.o_custkey
+            ) l2 on s.s_suppkey = l2.l_suppkey
+         ) l3 on l3.c_nationkey = t.c_nationkey and l3.s_nationkey = t.s_nationkey
+   ) shipping
+group by supp_nation, cust_nation, l_year
+order by supp_nation, cust_nation, l_year;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q8_national_market_share.hive b/hivesterix/src/test/resources/optimizerts/queries/q8_national_market_share.hive
new file mode 100644
index 0000000..5e7baaa
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q8_national_market_share.hive
@@ -0,0 +1,56 @@
+DROP TABLE IF EXISTS customer;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS region;
+DROP TABLE IF EXISTS part;
+DROP TABLE IF EXISTS q8_national_market_share;
+
+-- create the tables and load the data
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+create external table region (R_REGIONKEY INT, R_NAME STRING, R_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/region';
+
+-- create the result table
+create table q8_national_market_share(o_year string, mkt_share double);
+
+-- the query
+insert overwrite table q8_national_market_share 
+select 
+  o_year, sum(case when nation = 'BRAZIL' then volume else 0.0 end) / sum(volume) as mkt_share
+from 
+  (
+select 
+  year(o_orderdate) as o_year, l_extendedprice * (1-l_discount) as volume, 
+  n2.n_name as nation
+    from
+      nation n2 join
+        (select o_orderdate, l_discount, l_extendedprice, s_nationkey 
+         from supplier s join
+          (select o_orderdate, l_discount, l_extendedprice, l_suppkey 
+           from part p join
+             (select o_orderdate, l_partkey, l_discount, l_extendedprice, l_suppkey 
+              from 
+                (select o_orderdate, o_orderkey 
+                 from orders o join
+                   (select c.c_custkey 
+                    from customer c join
+                      (select n1.n_nationkey 
+                       from nation n1 join region r
+                       on n1.n_regionkey = r.r_regionkey and r.r_name = 'AMERICA'
+                       ) n11 on c.c_nationkey = n11.n_nationkey
+                    ) c1 on c1.c_custkey = o.o_custkey
+                 ) o1 join lineitem l on l.l_orderkey = o1.o_orderkey and o1.o_orderdate >= '1995-01-01' 
+                         and o1.o_orderdate < '1996-12-31'
+              ) l1 on p.p_partkey = l1.l_partkey and p.p_type = 'ECONOMY ANODIZED STEEL'
+           ) p1 on s.s_suppkey = p1.l_suppkey
+        ) s1 on s1.s_nationkey = n2.n_nationkey
+  ) all_nation
+group by o_year
+order by o_year;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/q9_product_type_profit.hive b/hivesterix/src/test/resources/optimizerts/queries/q9_product_type_profit.hive
new file mode 100644
index 0000000..586779c
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/q9_product_type_profit.hive
@@ -0,0 +1,51 @@
+DROP TABLE IF EXISTS part;
+DROP TABLE IF EXISTS lineitem;
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS orders;
+DROP TABLE IF EXISTS partsupp;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS q9_product_type_profit;
+
+-- create the tables and load the data
+create external table part (P_PARTKEY INT, P_NAME STRING, P_MFGR STRING, P_BRAND STRING, P_TYPE STRING, P_SIZE INT, P_CONTAINER STRING, P_RETAILPRICE DOUBLE, P_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/part';
+Create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';
+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';
+create external table partsupp (PS_PARTKEY INT, PS_SUPPKEY INT, PS_AVAILQTY INT, PS_SUPPLYCOST DOUBLE, PS_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION'/tpch/partsupp';
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+
+-- create the result table
+create table q9_product_type_profit (nation string, o_year string, sum_profit double);
+
+set mapred.min.split.size=536870912;
+set hive.exec.reducers.bytes.per.reducer=1024000000;
+
+-- the query
+insert overwrite table q9_product_type_profit
+select 
+  nation, o_year, sum(amount) as sum_profit
+from 
+  (
+select 
+  n_name as nation, year(o_orderdate) as o_year, 
+  l_extendedprice * (1 - l_discount) -  ps_supplycost * l_quantity as amount
+    from
+      orders o join
+      (select l_extendedprice, l_discount, l_quantity, l_orderkey, n_name, ps_supplycost 
+       from part p join
+         (select l_extendedprice, l_discount, l_quantity, l_partkey, l_orderkey, 
+                 n_name, ps_supplycost 
+          from partsupp ps join
+            (select l_suppkey, l_extendedprice, l_discount, l_quantity, l_partkey, 
+                    l_orderkey, n_name 
+             from
+               (select s_suppkey, n_name 
+                from nation n join supplier s on n.n_nationkey = s.s_nationkey
+               ) s1 join lineitem l on s1.s_suppkey = l.l_suppkey
+            ) l1 on ps.ps_suppkey = l1.l_suppkey and ps.ps_partkey = l1.l_partkey
+         ) l2 on p.p_name like '%green%' and p.p_partkey = l2.l_partkey
+     ) l3 on o.o_orderkey = l3.l_orderkey
+  )profit
+group by nation, o_year
+order by nation, o_year desc;
+
diff --git a/hivesterix/src/test/resources/optimizerts/queries/u10_nestedloop_join.hive b/hivesterix/src/test/resources/optimizerts/queries/u10_nestedloop_join.hive
new file mode 100644
index 0000000..ce94ac6
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u10_nestedloop_join.hive
@@ -0,0 +1,21 @@
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS u10_nestedloop_join;
+
+-- create tables and load data
+create external table nation (N_NATIONKEY INT, N_NAME STRING, N_REGIONKEY INT, N_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/nation';
+
+-- create the target table
+create table u10_nestedloop_join(supp_nation string, cust_nation string, s_nationkey int, c_nationkey int);
+
+-- the query
+insert overwrite table u10_nestedloop_join
+select 
+  * 
+from
+  (
+    select 
+      n1.n_name as supp_nation, n2.n_name as cust_nation, n1.n_nationkey as s_nationkey,      
+      n2.n_nationkey as c_nationkey
+from 
+  nation n1 join nation n2 where n1.n_nationkey > n2.n_nationkey
+) a;
\ No newline at end of file
diff --git a/hivesterix/src/test/resources/optimizerts/queries/u1_group_by.hive b/hivesterix/src/test/resources/optimizerts/queries/u1_group_by.hive
new file mode 100644
index 0000000..1d5c312
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u1_group_by.hive
@@ -0,0 +1,12 @@
+drop table IF EXISTS lineitem;
+
+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, 
+L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, 
+L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, 
+L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS 
+TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';
+
+select * from (select sum(abs(L_QUANTITY)), SUM(L_EXTENDEDPRICE*(1-L_DISCOUNT)*(1+L_TAX))
+FROM lineitem WHERE L_SHIPDATE<='1998-09-02'  GROUP BY L_RETURNFLAG) T;
+
+drop table lineitem;
diff --git a/hivesterix/src/test/resources/optimizerts/queries/u2_select-project.hive b/hivesterix/src/test/resources/optimizerts/queries/u2_select-project.hive
new file mode 100644
index 0000000..1cf0c36
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u2_select-project.hive
@@ -0,0 +1,7 @@
+drop table IF EXISTS supplier;

+drop table IF EXISTS result;

+

+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';

+create table result (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT);

+

+select (2*s_suppkey), s_address, s_nationkey,  s_name   FROM supplier where S_SUPPKEY*2 < 20;

diff --git a/hivesterix/src/test/resources/optimizerts/queries/u3_union.hive b/hivesterix/src/test/resources/optimizerts/queries/u3_union.hive
new file mode 100644
index 0000000..1c84ba8
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u3_union.hive
@@ -0,0 +1,7 @@
+drop table IF EXISTS supplier;

+drop table IF EXISTS result;

+

+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';

+create table result (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT);

+

+select * from (select (2*s_suppkey), s_address, s_nationkey,  s_name  FROM supplier where S_SUPPKEY*2 < 20 union all select (2*s_suppkey), s_address, s_nationkey,  s_name   FROM supplier where S_SUPPKEY*2 > 50) t;

diff --git a/hivesterix/src/test/resources/optimizerts/queries/u4_join.hive b/hivesterix/src/test/resources/optimizerts/queries/u4_join.hive
new file mode 100644
index 0000000..c013fa6
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u4_join.hive
@@ -0,0 +1,14 @@
+drop table IF EXISTS supplier;

+drop table IF EXISTS result;

+

+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, 

+S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED

+BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';

+

+create table result (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT);

+

+insert overwrite table result select s_suppkey, s_name, s_address, s_nationkey

+from supplier where S_SUPPKEY*2 < 20;

+

+select result.s_suppkey, supplier.s_phone, supplier.s_acctbal,

+supplier.s_comment from result join supplier on result.s_suppkey=supplier.s_suppkey;

diff --git a/hivesterix/src/test/resources/optimizerts/queries/u5_lateral_view.hive b/hivesterix/src/test/resources/optimizerts/queries/u5_lateral_view.hive
new file mode 100644
index 0000000..2740bca
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u5_lateral_view.hive
@@ -0,0 +1,7 @@
+drop table IF EXISTS supplier;

+drop table IF EXISTS result;

+

+create table supplier (S_SUPPKEY array<int>, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING);

+create table result (S_SUPPKEY int);

+

+select s_name, s_address, col1 from supplier LATERAL VIEW explode(s_suppkey) suppadd as col1;

diff --git a/hivesterix/src/test/resources/optimizerts/queries/u6_limit.hive b/hivesterix/src/test/resources/optimizerts/queries/u6_limit.hive
new file mode 100644
index 0000000..b268aff
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u6_limit.hive
@@ -0,0 +1,8 @@
+drop table IF EXISTS orders;

+drop table IF EXISTS result;

+drop table IF EXISTS q_limit2;

+

+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';

+create table q_limit2(col1 int, col2 double, col3 string, col4 string);

+

+insert overwrite table q_limit2 select O_ORDERKEY, O_TOTALPRICE, O_ORDERDATE, O_CLERK from orders where O_TOTALPRICE<10000 order by o_totalprice limit 4;
\ No newline at end of file
diff --git a/hivesterix/src/test/resources/optimizerts/queries/u7_multi_join.hive b/hivesterix/src/test/resources/optimizerts/queries/u7_multi_join.hive
new file mode 100644
index 0000000..2891c56
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u7_multi_join.hive
@@ -0,0 +1,9 @@
+drop table IF EXISTS lineitem;

+drop table IF EXISTS orders;

+drop table IF EXISTS customer;

+

+create external table lineitem (L_ORDERKEY INT, L_PARTKEY INT, L_SUPPKEY INT, L_LINENUMBER INT, L_QUANTITY DOUBLE, L_EXTENDEDPRICE DOUBLE, L_DISCOUNT DOUBLE, L_TAX DOUBLE, L_RETURNFLAG STRING, L_LINESTATUS STRING, L_SHIPDATE STRING, L_COMMITDATE STRING, L_RECEIPTDATE STRING, L_SHIPINSTRUCT STRING, L_SHIPMODE STRING, L_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/lineitem';

+create external table orders (O_ORDERKEY INT, O_CUSTKEY INT, O_ORDERSTATUS STRING, O_TOTALPRICE DOUBLE, O_ORDERDATE STRING, O_ORDERPRIORITY STRING, O_CLERK STRING, O_SHIPPRIORITY INT, O_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/orders';

+create external table customer (C_CUSTKEY INT, C_NAME STRING, C_ADDRESS STRING, C_NATIONKEY INT, C_PHONE STRING, C_ACCTBAL DOUBLE, C_MKTSEGMENT STRING, C_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/customer';

+

+select l_linenumber, o_orderkey, o_totalprice, o_orderdate, o_shippriority from  customer c join orders o  on c.c_custkey = o.o_custkey join lineitem l    on o.o_orderkey = l.l_orderkey  where c.c_custkey<5 and o.o_totalprice<30000;

diff --git a/hivesterix/src/test/resources/optimizerts/queries/u8_non_mapred.hive b/hivesterix/src/test/resources/optimizerts/queries/u8_non_mapred.hive
new file mode 100644
index 0000000..247f2c1
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u8_non_mapred.hive
@@ -0,0 +1,7 @@
+drop table IF EXISTS supplier;

+drop table IF EXISTS result;

+

+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';

+create table result (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT);

+

+select * FROM supplier;

diff --git a/hivesterix/src/test/resources/optimizerts/queries/u9_order_by.hive b/hivesterix/src/test/resources/optimizerts/queries/u9_order_by.hive
new file mode 100644
index 0000000..8d5d1cf
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/queries/u9_order_by.hive
@@ -0,0 +1,7 @@
+drop table IF EXISTS supplier;

+drop table IF EXISTS result;

+

+create external table supplier (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT, S_PHONE STRING, S_ACCTBAL DOUBLE, S_COMMENT STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE LOCATION '/tpch/supplier';

+create table result (S_SUPPKEY INT, S_NAME STRING, S_ADDRESS STRING, S_NATIONKEY INT);

+

+insert overwrite table result select s_suppkey, s_name, s_address, s_nationkey  FROM supplier where S_SUPPKEY*2 < 20 order by s_name;

diff --git a/hivesterix/src/test/resources/optimizerts/results/h11_share_scan.plan b/hivesterix/src/test/resources/optimizerts/results/h11_share_scan.plan
new file mode 100644
index 0000000..867bfaf
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/h11_share_scan.plan
@@ -0,0 +1,34 @@
+write [%0->$$1, %0->$$2]
+-- SINK_WRITE  |PARTITIONED|
+  select (function-call: algebricks:lt, Args:[%0->$$1, 5])
+  -- STREAM_SELECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      replicate 
+      -- SPLIT  |PARTITIONED|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          data-scan []<-[$$1, $$2] <- default.src
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$1]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$1])
+  -- STREAM_PROJECT  |PARTITIONED|
+    select (function-call: algebricks:gt, Args:[%0->$$1, 10])
+    -- STREAM_SELECT  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        replicate 
+        -- SPLIT  |PARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            data-scan []<-[$$1, $$2] <- default.src
+            -- DATASOURCE_SCAN  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                empty-tuple-source
+                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/h12_select_struct.plan b/hivesterix/src/test/resources/optimizerts/results/h12_select_struct.plan
new file mode 100644
index 0000000..8bbfb61
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/h12_select_struct.plan
@@ -0,0 +1,10 @@
+write [%0->$$2]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    data-scan [$$2]<-[$$1, $$2] <- default.src
+    -- DATASOURCE_SCAN  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        empty-tuple-source
+        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q10_returned_item.plan b/hivesterix/src/test/resources/optimizerts/results/q10_returned_item.plan
new file mode 100644
index 0000000..05b3718
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q10_returned_item.plan
@@ -0,0 +1,102 @@
+write [%0->$$38, %0->$$39, %0->$$45, %0->$$40, %0->$$42, %0->$$43, %0->$$41, %0->$$44]
+-- SINK_WRITE  |UNPARTITIONED|
+  project ([$$38, $$39, $$45, $$40, $$42, $$43, $$41, $$44])
+  -- STREAM_PROJECT  |PARTITIONED|
+    limit 20
+    -- STREAM_LIMIT  |UNPARTITIONED|
+      limit 20
+      -- STREAM_LIMIT  |UNPARTITIONED|
+        exchange 
+        -- SORT_MERGE_EXCHANGE [$$45(DESC) ]  |PARTITIONED|
+          limit 20
+          -- STREAM_LIMIT  |LOCAL|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+              order (DESC, %0->$$45) 
+              -- STABLE_SORT [$$45(DESC)]  |LOCAL|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  group by ([$$38 := %0->$$48; $$39 := %0->$$49; $$40 := %0->$$50; $$41 := %0->$$51; $$42 := %0->$$52; $$43 := %0->$$53; $$44 := %0->$$54]) decor ([]) {
+                            aggregate [$$45] <- [function-call: hive:sum(FINAL), Args:[%0->$$47]]
+                            -- AGGREGATE  |LOCAL|
+                              nested tuple source
+                              -- NESTED_TUPLE_SOURCE  |LOCAL|
+                         }
+                  -- EXTERNAL_GROUP_BY[$$48, $$49, $$50, $$51, $$52, $$53, $$54]  |PARTITIONED|
+                    exchange 
+                    -- HASH_PARTITION_EXCHANGE [$$48, $$49, $$50, $$51, $$52, $$53, $$54]  |PARTITIONED|
+                      group by ([$$48 := %0->$$21; $$49 := %0->$$22; $$50 := %0->$$26; $$51 := %0->$$25; $$52 := %0->$$18; $$53 := %0->$$23; $$54 := %0->$$28]) decor ([]) {
+                                aggregate [$$47] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$6, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$7]]]]
+                                -- AGGREGATE  |LOCAL|
+                                  nested tuple source
+                                  -- NESTED_TUPLE_SOURCE  |LOCAL|
+                             }
+                      -- EXTERNAL_GROUP_BY[$$21, $$22, $$26, $$25, $$18, $$23, $$28]  |LOCAL|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          project ([$$21, $$22, $$23, $$25, $$26, $$28, $$18, $$6, $$7])
+                          -- STREAM_PROJECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$29])
+                              -- HYBRID_HASH_JOIN [$$1][$$29]  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                                  project ([$$1, $$6, $$7])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    select (function-call: algebricks:eq, Args:[%0->$$9, R])
+                                    -- STREAM_SELECT  |PARTITIONED|
+                                      exchange 
+                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                        data-scan [$$1, $$6, $$7, $$9]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            empty-tuple-source
+                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$29]  |PARTITIONED|
+                                  project ([$$29, $$21, $$22, $$23, $$25, $$26, $$28, $$18])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      join (function-call: algebricks:eq, Args:[%0->$$17, %0->$$24])
+                                      -- HYBRID_HASH_JOIN [$$17][$$24]  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$17]  |PARTITIONED|
+                                          data-scan [$$17, $$18]<-[$$17, $$18, $$19, $$20] <- default.nation
+                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              empty-tuple-source
+                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$24]  |PARTITIONED|
+                                          project ([$$24, $$21, $$22, $$23, $$25, $$26, $$28, $$29])
+                                          -- STREAM_PROJECT  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              join (function-call: algebricks:eq, Args:[%0->$$30, %0->$$21])
+                                              -- HYBRID_HASH_JOIN [$$30][$$21]  |PARTITIONED|
+                                                exchange 
+                                                -- HASH_PARTITION_EXCHANGE [$$30]  |PARTITIONED|
+                                                  project ([$$30, $$29])
+                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                    select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$33, 1994-01-01], function-call: algebricks:ge, Args:[%0->$$33, 1993-10-01], function-call: algebricks:lt, Args:[%0->$$33, 1994-01-01]])
+                                                    -- STREAM_SELECT  |PARTITIONED|
+                                                      exchange 
+                                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                        data-scan [$$33, $$29, $$30]<-[$$29, $$30, $$31, $$32, $$33, $$34, $$35, $$36, $$37] <- default.orders
+                                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                                          exchange 
+                                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                            empty-tuple-source
+                                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                exchange 
+                                                -- HASH_PARTITION_EXCHANGE [$$21]  |PARTITIONED|
+                                                  data-scan [$$21, $$22, $$23, $$24, $$25, $$26, $$28]<-[$$21, $$22, $$23, $$24, $$25, $$26, $$27, $$28] <- default.customer
+                                                  -- DATASOURCE_SCAN  |PARTITIONED|
+                                                    exchange 
+                                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                      empty-tuple-source
+                                                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q11_important_stock.plan b/hivesterix/src/test/resources/optimizerts/results/q11_important_stock.plan
new file mode 100644
index 0000000..70ad7ee
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q11_important_stock.plan
@@ -0,0 +1,126 @@
+write [%0->$$17, %0->$$18]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$17, $$18])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      group by ([$$17 := %0->$$21]) decor ([]) {
+                aggregate [$$18] <- [function-call: hive:sum(FINAL), Args:[%0->$$20]]
+                -- AGGREGATE  |LOCAL|
+                  nested tuple source
+                  -- NESTED_TUPLE_SOURCE  |LOCAL|
+             }
+      -- EXTERNAL_GROUP_BY[$$21]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$21]  |PARTITIONED|
+          group by ([$$21 := %0->$$1]) decor ([]) {
+                    aggregate [$$20] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$4, %0->$$3]]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- EXTERNAL_GROUP_BY[$$1]  |LOCAL|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              project ([$$1, $$3, $$4])
+              -- STREAM_PROJECT  |PARTITIONED|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  join (function-call: algebricks:eq, Args:[%0->$$2, %0->$$6])
+                  -- HYBRID_HASH_JOIN [$$2][$$6]  |PARTITIONED|
+                    exchange 
+                    -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+                      data-scan [$$2, $$1, $$3, $$4]<-[$$1, $$2, $$3, $$4, $$5] <- default.partsupp
+                      -- DATASOURCE_SCAN  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          empty-tuple-source
+                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                    exchange 
+                    -- HASH_PARTITION_EXCHANGE [$$6]  |PARTITIONED|
+                      project ([$$6])
+                      -- STREAM_PROJECT  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          join (function-call: algebricks:eq, Args:[%0->$$9, %0->$$13])
+                          -- HYBRID_HASH_JOIN [$$9][$$13]  |PARTITIONED|
+                            exchange 
+                            -- HASH_PARTITION_EXCHANGE [$$9]  |PARTITIONED|
+                              data-scan [$$9, $$6]<-[$$6, $$7, $$8, $$9, $$10, $$11, $$12] <- default.supplier
+                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  empty-tuple-source
+                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                            exchange 
+                            -- HASH_PARTITION_EXCHANGE [$$13]  |PARTITIONED|
+                              project ([$$13])
+                              -- STREAM_PROJECT  |PARTITIONED|
+                                select (function-call: algebricks:eq, Args:[%0->$$14, GERMANY])
+                                -- STREAM_SELECT  |PARTITIONED|
+                                  exchange 
+                                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                    data-scan [$$13, $$14]<-[$$13, $$14, $$15, $$16] <- default.nation
+                                    -- DATASOURCE_SCAN  |PARTITIONED|
+                                      exchange 
+                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                        empty-tuple-source
+                                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$3]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    group by ([]) decor ([]) {
+              aggregate [$$3] <- [function-call: hive:sum(FINAL), Args:[%0->$$5]]
+              -- AGGREGATE  |LOCAL|
+                nested tuple source
+                -- NESTED_TUPLE_SOURCE  |LOCAL|
+           }
+    -- EXTERNAL_GROUP_BY[]  |PARTITIONED|
+      exchange 
+      -- HASH_PARTITION_EXCHANGE []  |PARTITIONED|
+        group by ([]) decor ([]) {
+                  aggregate [$$5] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$2]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            data-scan [$$2]<-[$$1, $$2] <- default.q11_part_tmp
+            -- DATASOURCE_SCAN  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                empty-tuple-source
+                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$2, %0->$$3]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- SORT_MERGE_EXCHANGE [$$3(DESC) ]  |PARTITIONED|
+    order (DESC, %0->$$3) 
+    -- STABLE_SORT [$$3(DESC)]  |LOCAL|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        project ([$$2, $$3])
+        -- STREAM_PROJECT  |PARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            join (function-call: algebricks:and, Args:[function-call: algebricks:gt, Args:[%0->$$3, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$1, 1.0E-4]], true])
+            -- NESTED_LOOP  |PARTITIONED|
+              exchange 
+              -- BROADCAST_EXCHANGE  |PARTITIONED|
+                data-scan []<-[$$1] <- default.q11_sum_tmp
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                data-scan [$$2, $$3]<-[$$2, $$3] <- default.q11_part_tmp
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q12_shipping.plan b/hivesterix/src/test/resources/optimizerts/results/q12_shipping.plan
new file mode 100644
index 0000000..5c240e2
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q12_shipping.plan
@@ -0,0 +1,58 @@
+write [%0->$$26, %0->$$29, %0->$$30]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$26, $$29, $$30])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$29, $$30] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToDouble, Args:[%0->$$27], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToDouble, Args:[%0->$$28]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$26(ASC) ]  |PARTITIONED|
+        order (ASC, %0->$$26) 
+        -- STABLE_SORT [$$26(ASC)]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            group by ([$$26 := %0->$$34]) decor ([]) {
+                      aggregate [$$27, $$28] <- [function-call: hive:sum(FINAL), Args:[%0->$$32], function-call: hive:sum(FINAL), Args:[%0->$$33]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$34]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$34]  |PARTITIONED|
+                group by ([$$34 := %0->$$24]) decor ([]) {
+                          aggregate [$$32, $$33] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen, Args:[function-call: algebricks:or, Args:[function-call: algebricks:eq, Args:[%0->$$6, 1-URGENT], function-call: algebricks:eq, Args:[%0->$$6, 2-HIGH]], 1, 0]], function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen, Args:[function-call: algebricks:and, Args:[function-call: algebricks:neq, Args:[%0->$$6, 1-URGENT], function-call: algebricks:neq, Args:[%0->$$6, 2-HIGH]], 1, 0]]]
+                          -- AGGREGATE  |LOCAL|
+                            nested tuple source
+                            -- NESTED_TUPLE_SOURCE  |LOCAL|
+                       }
+                -- EXTERNAL_GROUP_BY[$$24]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    project ([$$6, $$24])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        join (function-call: algebricks:eq, Args:[%0->$$10, %0->$$1])
+                        -- HYBRID_HASH_JOIN [$$10][$$1]  |PARTITIONED|
+                          exchange 
+                          -- HASH_PARTITION_EXCHANGE [$$10]  |PARTITIONED|
+                            project ([$$10, $$24])
+                            -- STREAM_PROJECT  |PARTITIONED|
+                              select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$22, 1995-01-01], function-call: algebricks:ge, Args:[%0->$$22, 1994-01-01], function-call: algebricks:lt, Args:[%0->$$20, %0->$$21], function-call: algebricks:lt, Args:[%0->$$21, %0->$$22], function-call: algebricks:lt, Args:[%0->$$20, %0->$$21], function-call: algebricks:ge, Args:[%0->$$22, 1994-01-01], function-call: algebricks:lt, Args:[%0->$$22, 1995-01-01], function-call: algebricks:or, Args:[function-call: algebricks:eq, Args:[%0->$$24, MAIL], function-call: algebricks:eq, Args:[%0->$$24, SHIP]], function-call: algebricks:or, Args:[function-call: algebricks:eq, Args:[%0->$$24, MAIL], function-call: algebricks:eq, Args:[%0->$$24, SHIP]]])
+                              -- STREAM_SELECT  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  data-scan [$$21, $$20, $$22, $$24, $$10]<-[$$10, $$11, $$12, $$13, $$14, $$15, $$16, $$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24, $$25] <- default.lineitem
+                                  -- DATASOURCE_SCAN  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      empty-tuple-source
+                                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                          exchange 
+                          -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                            data-scan [$$1, $$6]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.orders
+                            -- DATASOURCE_SCAN  |PARTITIONED|
+                              exchange 
+                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                empty-tuple-source
+                                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q13_customer_distribution.plan b/hivesterix/src/test/resources/optimizerts/results/q13_customer_distribution.plan
new file mode 100644
index 0000000..19bcd24
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q13_customer_distribution.plan
@@ -0,0 +1,80 @@
+write [%0->$$22, %0->$$23]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$22, $$23])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$22, $$23] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$20], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$21]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$21(DESC), $$20(DESC) ]  |PARTITIONED|
+        order (DESC, %0->$$21) (DESC, %0->$$20) 
+        -- STABLE_SORT [$$21(DESC), $$20(DESC)]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            group by ([$$20 := %0->$$28]) decor ([]) {
+                      aggregate [$$21] <- [function-call: hive:count(FINAL), Args:[%0->$$27]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$28]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$28]  |PARTITIONED|
+                group by ([$$28 := %0->$$19]) decor ([]) {
+                          aggregate [$$27] <- [function-call: hive:count(PARTIAL1), Args:[1]]
+                          -- AGGREGATE  |LOCAL|
+                            nested tuple source
+                            -- NESTED_TUPLE_SOURCE  |LOCAL|
+                       }
+                -- EXTERNAL_GROUP_BY[$$19]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    project ([$$19])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        group by ([$$18 := %0->$$26]) decor ([]) {
+                                  aggregate [$$19] <- [function-call: hive:count(FINAL), Args:[%0->$$25]]
+                                  -- AGGREGATE  |LOCAL|
+                                    nested tuple source
+                                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+                               }
+                        -- EXTERNAL_GROUP_BY[$$26]  |PARTITIONED|
+                          exchange 
+                          -- HASH_PARTITION_EXCHANGE [$$26]  |PARTITIONED|
+                            group by ([$$26 := %0->$$10]) decor ([]) {
+                                      aggregate [$$25] <- [function-call: hive:count(PARTIAL1), Args:[%0->$$1]]
+                                      -- AGGREGATE  |LOCAL|
+                                        nested tuple source
+                                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                                   }
+                            -- EXTERNAL_GROUP_BY[$$10]  |LOCAL|
+                              exchange 
+                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                project ([$$10, $$1])
+                                -- STREAM_PROJECT  |PARTITIONED|
+                                  exchange 
+                                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                    left outer join (function-call: algebricks:eq, Args:[%0->$$2, %0->$$10])
+                                    -- HYBRID_HASH_JOIN [$$10][$$2]  |PARTITIONED|
+                                      exchange 
+                                      -- HASH_PARTITION_EXCHANGE [$$10]  |PARTITIONED|
+                                        data-scan [$$10]<-[$$10, $$11, $$12, $$13, $$14, $$15, $$16, $$17] <- default.customer
+                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            empty-tuple-source
+                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                      exchange 
+                                      -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+                                        project ([$$2, $$1])
+                                        -- STREAM_PROJECT  |PARTITIONED|
+                                          select (function-call: algebricks:not, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$9, %special%requests%]])
+                                          -- STREAM_SELECT  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              data-scan [$$1, $$2, $$9]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.orders
+                                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                                exchange 
+                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                  empty-tuple-source
+                                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q14_promotion_effect.plan b/hivesterix/src/test/resources/optimizerts/results/q14_promotion_effect.plan
new file mode 100644
index 0000000..21b90bd
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q14_promotion_effect.plan
@@ -0,0 +1,54 @@
+write [%0->$$28]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$28])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$28] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPDivide, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[100.0, %0->$$26], %0->$$27]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        group by ([]) decor ([]) {
+                  aggregate [$$26, $$27] <- [function-call: hive:sum(FINAL), Args:[%0->$$30], function-call: hive:sum(FINAL), Args:[%0->$$31]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[]  |PARTITIONED|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE []  |PARTITIONED|
+            group by ([]) decor ([]) {
+                      aggregate [$$30, $$31] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$5, PROMO%], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$15, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$16]], 0.0]], function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$15, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$16]]]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[]  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                project ([$$5, $$15, $$16])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    join (function-call: algebricks:eq, Args:[%0->$$11, %0->$$1])
+                    -- HYBRID_HASH_JOIN [$$11][$$1]  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$11]  |PARTITIONED|
+                        project ([$$11, $$15, $$16])
+                        -- STREAM_PROJECT  |PARTITIONED|
+                          select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$20, 1995-10-01], function-call: algebricks:ge, Args:[%0->$$20, 1995-09-01], function-call: algebricks:lt, Args:[%0->$$20, 1995-10-01]])
+                          -- STREAM_SELECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              data-scan [$$16, $$20, $$11, $$15]<-[$$10, $$11, $$12, $$13, $$14, $$15, $$16, $$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24, $$25] <- default.lineitem
+                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  empty-tuple-source
+                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                        data-scan [$$1, $$5]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.part
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q15_top_supplier.plan b/hivesterix/src/test/resources/optimizerts/results/q15_top_supplier.plan
new file mode 100644
index 0000000..a5bd27a
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q15_top_supplier.plan
@@ -0,0 +1,110 @@
+write [%0->$$17, %0->$$18]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$17, $$18])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      group by ([$$17 := %0->$$21]) decor ([]) {
+                aggregate [$$18] <- [function-call: hive:sum(FINAL), Args:[%0->$$20]]
+                -- AGGREGATE  |LOCAL|
+                  nested tuple source
+                  -- NESTED_TUPLE_SOURCE  |LOCAL|
+             }
+      -- EXTERNAL_GROUP_BY[$$21]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$21]  |PARTITIONED|
+          group by ([$$21 := %0->$$3]) decor ([]) {
+                    aggregate [$$20] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$6, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$7]]]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- EXTERNAL_GROUP_BY[$$3]  |LOCAL|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              project ([$$3, $$6, $$7])
+              -- STREAM_PROJECT  |PARTITIONED|
+                select (function-call: algebricks:and, Args:[function-call: algebricks:ge, Args:[%0->$$11, 1996-01-01], function-call: algebricks:lt, Args:[%0->$$11, 1996-04-01], function-call: algebricks:ge, Args:[%0->$$11, 1996-01-01], function-call: algebricks:lt, Args:[%0->$$11, 1996-04-01]])
+                -- STREAM_SELECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    data-scan [$$3, $$6, $$7, $$11]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                    -- DATASOURCE_SCAN  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        empty-tuple-source
+                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$3]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    group by ([]) decor ([]) {
+              aggregate [$$3] <- [function-call: hive:max(FINAL), Args:[%0->$$5]]
+              -- AGGREGATE  |LOCAL|
+                nested tuple source
+                -- NESTED_TUPLE_SOURCE  |LOCAL|
+           }
+    -- PRE_CLUSTERED_GROUP_BY[]  |PARTITIONED|
+      exchange 
+      -- HASH_PARTITION_EXCHANGE []  |PARTITIONED|
+        group by ([]) decor ([]) {
+                  aggregate [$$5] <- [function-call: hive:max(PARTIAL1), Args:[%0->$$2]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- PRE_CLUSTERED_GROUP_BY[]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            data-scan [$$2]<-[$$1, $$2] <- default.revenue
+            -- DATASOURCE_SCAN  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                empty-tuple-source
+                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$2, %0->$$3, %0->$$4, %0->$$6, %0->$$10]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- SORT_MERGE_EXCHANGE [$$2(ASC) ]  |PARTITIONED|
+    order (ASC, %0->$$2) 
+    -- STABLE_SORT [$$2(ASC)]  |LOCAL|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        project ([$$2, $$3, $$4, $$6, $$10])
+        -- STREAM_PROJECT  |PARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$10])
+            -- HYBRID_HASH_JOIN [$$1][$$10]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                data-scan []<-[$$1] <- default.max_revenue
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$10]  |PARTITIONED|
+                project ([$$10, $$2, $$3, $$4, $$6])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    join (function-call: algebricks:eq, Args:[%0->$$9, %0->$$2])
+                    -- HYBRID_HASH_JOIN [$$9][$$2]  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$9]  |PARTITIONED|
+                        data-scan []<-[$$9, $$10] <- default.revenue
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+                        data-scan [$$2, $$3, $$4, $$6]<-[$$2, $$3, $$4, $$5, $$6, $$7, $$8] <- default.supplier
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q16_parts_supplier_relationship.plan b/hivesterix/src/test/resources/optimizerts/results/q16_parts_supplier_relationship.plan
new file mode 100644
index 0000000..9835346
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q16_parts_supplier_relationship.plan
@@ -0,0 +1,98 @@
+write [%0->$$1]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$1])
+  -- STREAM_PROJECT  |PARTITIONED|
+    select (function-call: algebricks:not, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$7, %Customer%Complaints%]])
+    -- STREAM_SELECT  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        data-scan [$$1, $$7]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7] <- default.supplier
+        -- DATASOURCE_SCAN  |PARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            empty-tuple-source
+            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$5, %0->$$6, %0->$$7, %0->$$12]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$5, $$6, $$7, $$12])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$12])
+      -- HYBRID_HASH_JOIN [$$1][$$12]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+          data-scan []<-[$$1] <- default.supplier_tmp
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$12]  |PARTITIONED|
+          project ([$$12, $$5, $$6, $$7])
+          -- STREAM_PROJECT  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              join (function-call: algebricks:eq, Args:[%0->$$2, %0->$$11])
+              -- HYBRID_HASH_JOIN [$$2][$$11]  |PARTITIONED|
+                exchange 
+                -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+                  select (function-call: algebricks:and, Args:[function-call: algebricks:not, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$6, MEDIUM POLISHED%]], function-call: algebricks:neq, Args:[%0->$$5, Brand#45], function-call: algebricks:not, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$6, MEDIUM POLISHED%]]])
+                  -- STREAM_SELECT  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      data-scan [$$2, $$5, $$6, $$7]<-[$$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10] <- default.part
+                      -- DATASOURCE_SCAN  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          empty-tuple-source
+                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                exchange 
+                -- HASH_PARTITION_EXCHANGE [$$11]  |PARTITIONED|
+                  data-scan [$$11, $$12]<-[$$11, $$12, $$13, $$14, $$15] <- default.partsupp
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$10, %0->$$11, %0->$$12, %0->$$14]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$10, $$11, $$12, $$14])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$14] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$13]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$13(DESC), $$10(ASC), $$11(ASC), $$12(ASC) ]  |PARTITIONED|
+        order (DESC, %0->$$13) (ASC, %0->$$10) (ASC, %0->$$11) (ASC, %0->$$12) 
+        -- STABLE_SORT [$$13(DESC), $$10(ASC), $$11(ASC), $$12(ASC)]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            group by ([$$10 := %0->$$5; $$11 := %0->$$6; $$12 := %0->$$7]) decor ([]) {
+                      aggregate [$$13] <- [function-call: hive:count(COMPLETE), Args:[%0->$$8]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$5, $$6, $$7]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$5, $$6, $$7]  |PARTITIONED|
+                group by ([$$5 := %0->$$1; $$6 := %0->$$2; $$7 := %0->$$3; $$8 := %0->$$4]) decor ([]) {
+                          aggregate [] <- []
+                          -- AGGREGATE  |LOCAL|
+                            nested tuple source
+                            -- NESTED_TUPLE_SOURCE  |LOCAL|
+                       }
+                -- EXTERNAL_GROUP_BY[$$1, $$2, $$3, $$4]  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    select (function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:eq, Args:[%0->$$3, 49], function-call: algebricks:eq, Args:[%0->$$3, 14]], function-call: algebricks:eq, Args:[%0->$$3, 23]], function-call: algebricks:eq, Args:[%0->$$3, 45]], function-call: algebricks:eq, Args:[%0->$$3, 19]], function-call: algebricks:eq, Args:[%0->$$3, 3]], function-call: algebricks:eq, Args:[%0->$$3, 36]], function-call: algebricks:eq, Args:[%0->$$3, 9]])
+                    -- STREAM_SELECT  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        data-scan []<-[$$1, $$2, $$3, $$4] <- default.q16_tmp
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q17_small_quantity_order_revenue.plan b/hivesterix/src/test/resources/optimizerts/results/q17_small_quantity_order_revenue.plan
new file mode 100644
index 0000000..a827007
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q17_small_quantity_order_revenue.plan
@@ -0,0 +1,104 @@
+write [%0->$$17, %0->$$19]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$17, $$19])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$19] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[0.2, %0->$$18]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        group by ([$$17 := %0->$$22]) decor ([]) {
+                  aggregate [$$18] <- [function-call: hive:avg(FINAL), Args:[%0->$$21]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[$$22]  |PARTITIONED|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE [$$22]  |PARTITIONED|
+            group by ([$$22 := %0->$$2]) decor ([]) {
+                      aggregate [$$21] <- [function-call: hive:avg(PARTIAL1), Args:[%0->$$5]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$2]  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                data-scan [$$2, $$5]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$29]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$29])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$29] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPDivide, Args:[%0->$$28, 7.0]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        group by ([]) decor ([]) {
+                  aggregate [$$28] <- [function-call: hive:sum(FINAL), Args:[%0->$$31]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[]  |PARTITIONED|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE []  |PARTITIONED|
+            group by ([]) decor ([]) {
+                      aggregate [$$31] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$17]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[]  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                project ([$$17])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  select (function-call: algebricks:lt, Args:[%0->$$16, %0->$$2])
+                  -- STREAM_SELECT  |UNPARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      join (function-call: algebricks:eq, Args:[%0->$$13, %0->$$1])
+                      -- HYBRID_HASH_JOIN [$$13][$$1]  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          project ([$$13, $$16, $$17])
+                          -- STREAM_PROJECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              join (function-call: algebricks:eq, Args:[%0->$$13, %0->$$3])
+                              -- HYBRID_HASH_JOIN [$$13][$$3]  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$13]  |PARTITIONED|
+                                  data-scan [$$13, $$16, $$17]<-[$$12, $$13, $$14, $$15, $$16, $$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24, $$25, $$26, $$27] <- default.lineitem
+                                  -- DATASOURCE_SCAN  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      empty-tuple-source
+                                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$3]  |PARTITIONED|
+                                  project ([$$3])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    select (function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$9, MED BOX], function-call: algebricks:eq, Args:[%0->$$6, Brand#23], function-call: algebricks:eq, Args:[%0->$$9, MED BOX]])
+                                    -- STREAM_SELECT  |PARTITIONED|
+                                      exchange 
+                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                        data-scan [$$3, $$6, $$9]<-[$$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11] <- default.part
+                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            empty-tuple-source
+                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                        exchange 
+                        -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                          data-scan [$$1, $$2]<-[$$1, $$2] <- default.lineitem_tmp
+                          -- DATASOURCE_SCAN  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              empty-tuple-source
+                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q18_large_volume_customer.plan b/hivesterix/src/test/resources/optimizerts/results/q18_large_volume_customer.plan
new file mode 100644
index 0000000..ea47ea0
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q18_large_volume_customer.plan
@@ -0,0 +1,126 @@
+write [%0->$$17, %0->$$18]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$17, $$18])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      group by ([$$17 := %0->$$21]) decor ([]) {
+                aggregate [$$18] <- [function-call: hive:sum(FINAL), Args:[%0->$$20]]
+                -- AGGREGATE  |LOCAL|
+                  nested tuple source
+                  -- NESTED_TUPLE_SOURCE  |LOCAL|
+             }
+      -- EXTERNAL_GROUP_BY[$$21]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$21]  |PARTITIONED|
+          group by ([$$21 := %0->$$1]) decor ([]) {
+                    aggregate [$$20] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$5]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- EXTERNAL_GROUP_BY[$$1]  |LOCAL|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              data-scan [$$1, $$5]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+              -- DATASOURCE_SCAN  |PARTITIONED|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  empty-tuple-source
+                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$36, %0->$$37, %0->$$38, %0->$$39, %0->$$40, %0->$$41]
+-- SINK_WRITE  |UNPARTITIONED|
+  project ([$$36, $$37, $$38, $$39, $$40, $$41])
+  -- STREAM_PROJECT  |PARTITIONED|
+    limit 100
+    -- STREAM_LIMIT  |UNPARTITIONED|
+      limit 100
+      -- STREAM_LIMIT  |UNPARTITIONED|
+        exchange 
+        -- SORT_MERGE_EXCHANGE [$$40(DESC), $$39(ASC) ]  |PARTITIONED|
+          limit 100
+          -- STREAM_LIMIT  |LOCAL|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+              order (DESC, %0->$$40) (ASC, %0->$$39) 
+              -- STABLE_SORT [$$40(DESC), $$39(ASC)]  |LOCAL|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  group by ([$$36 := %0->$$44; $$37 := %0->$$45; $$38 := %0->$$46; $$39 := %0->$$47; $$40 := %0->$$48]) decor ([]) {
+                            aggregate [$$41] <- [function-call: hive:sum(FINAL), Args:[%0->$$43]]
+                            -- AGGREGATE  |LOCAL|
+                              nested tuple source
+                              -- NESTED_TUPLE_SOURCE  |LOCAL|
+                         }
+                  -- EXTERNAL_GROUP_BY[$$44, $$45, $$46, $$47, $$48]  |PARTITIONED|
+                    exchange 
+                    -- HASH_PARTITION_EXCHANGE [$$44, $$45, $$46, $$47, $$48]  |PARTITIONED|
+                      group by ([$$44 := %0->$$20; $$45 := %0->$$19; $$46 := %0->$$27; $$47 := %0->$$31; $$48 := %0->$$30]) decor ([]) {
+                                aggregate [$$43] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$7]]
+                                -- AGGREGATE  |LOCAL|
+                                  nested tuple source
+                                  -- NESTED_TUPLE_SOURCE  |LOCAL|
+                             }
+                      -- EXTERNAL_GROUP_BY[$$20, $$19, $$27, $$31, $$30]  |LOCAL|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          project ([$$19, $$20, $$27, $$30, $$31, $$7])
+                          -- STREAM_PROJECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              join (function-call: algebricks:eq, Args:[%0->$$27, %0->$$1])
+                              -- HYBRID_HASH_JOIN [$$1][$$27]  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                                  project ([$$1, $$7])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      join (function-call: algebricks:eq, Args:[%0->$$3, %0->$$1])
+                                      -- HYBRID_HASH_JOIN [$$3][$$1]  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$3]  |PARTITIONED|
+                                          data-scan [$$3, $$7]<-[$$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16, $$17, $$18] <- default.lineitem
+                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              empty-tuple-source
+                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                                          project ([$$1])
+                                          -- STREAM_PROJECT  |PARTITIONED|
+                                            select (function-call: algebricks:gt, Args:[%0->$$2, 300])
+                                            -- STREAM_SELECT  |PARTITIONED|
+                                              exchange 
+                                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                data-scan []<-[$$1, $$2] <- default.q18_tmp
+                                                -- DATASOURCE_SCAN  |PARTITIONED|
+                                                  exchange 
+                                                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                    empty-tuple-source
+                                                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$27]  |PARTITIONED|
+                                  project ([$$27, $$19, $$20, $$30, $$31])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      join (function-call: algebricks:eq, Args:[%0->$$28, %0->$$19])
+                                      -- HYBRID_HASH_JOIN [$$28][$$19]  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$28]  |PARTITIONED|
+                                          data-scan [$$28, $$27, $$30, $$31]<-[$$27, $$28, $$29, $$30, $$31, $$32, $$33, $$34, $$35] <- default.orders
+                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              empty-tuple-source
+                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$19]  |PARTITIONED|
+                                          data-scan [$$19, $$20]<-[$$19, $$20, $$21, $$22, $$23, $$24, $$25, $$26] <- default.customer
+                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              empty-tuple-source
+                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q19_discounted_revenue.plan b/hivesterix/src/test/resources/optimizerts/results/q19_discounted_revenue.plan
new file mode 100644
index 0000000..1827729
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q19_discounted_revenue.plan
@@ -0,0 +1,46 @@
+write [%0->$$26]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    group by ([]) decor ([]) {
+              aggregate [$$26] <- [function-call: hive:sum(FINAL), Args:[%0->$$28]]
+              -- AGGREGATE  |LOCAL|
+                nested tuple source
+                -- NESTED_TUPLE_SOURCE  |LOCAL|
+           }
+    -- EXTERNAL_GROUP_BY[]  |PARTITIONED|
+      exchange 
+      -- HASH_PARTITION_EXCHANGE []  |PARTITIONED|
+        group by ([]) decor ([]) {
+                  aggregate [$$28] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$15, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$16]]]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            project ([$$15, $$16])
+            -- STREAM_PROJECT  |PARTITIONED|
+              select (function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$4, Brand#12], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFRegExp, Args:[%0->$$7, SM CASE||SM BOX||SM PACK||SM PKG]], function-call: algebricks:ge, Args:[%0->$$14, 1]], function-call: algebricks:le, Args:[%0->$$14, 11]], function-call: algebricks:ge, Args:[%0->$$6, 1]], function-call: algebricks:le, Args:[%0->$$6, 5]], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFRegExp, Args:[%0->$$24, AIR||AIR REG]], function-call: algebricks:eq, Args:[%0->$$23, DELIVER IN PERSON]], function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$4, Brand#23], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFRegExp, Args:[%0->$$7, MED BAG||MED BOX||MED PKG||MED PACK]], function-call: algebricks:ge, Args:[%0->$$14, 10]], function-call: algebricks:le, Args:[%0->$$14, 20]], function-call: algebricks:ge, Args:[%0->$$6, 1]], function-call: algebricks:le, Args:[%0->$$6, 10]], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFRegExp, Args:[%0->$$24, AIR||AIR REG]], function-call: algebricks:eq, Args:[%0->$$23, DELIVER IN PERSON]]], function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$4, Brand#34], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFRegExp, Args:[%0->$$7, LG CASE||LG BOX||LG PACK||LG PKG]], function-call: algebricks:ge, Args:[%0->$$14, 20]], function-call: algebricks:le, Args:[%0->$$14, 30]], function-call: algebricks:ge, Args:[%0->$$6, 1]], function-call: algebricks:le, Args:[%0->$$6, 15]], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFRegExp, Args:[%0->$$24, AIR||AIR REG]], function-call: algebricks:eq, Args:[%0->$$23, DELIVER IN PERSON]]])
+              -- STREAM_SELECT  |UNPARTITIONED|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  join (function-call: algebricks:eq, Args:[%0->$$11, %0->$$1])
+                  -- HYBRID_HASH_JOIN [$$11][$$1]  |PARTITIONED|
+                    exchange 
+                    -- HASH_PARTITION_EXCHANGE [$$11]  |PARTITIONED|
+                      data-scan [$$11, $$14, $$15, $$16, $$23, $$24]<-[$$10, $$11, $$12, $$13, $$14, $$15, $$16, $$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24, $$25] <- default.lineitem
+                      -- DATASOURCE_SCAN  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          empty-tuple-source
+                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                    exchange 
+                    -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                      data-scan [$$1, $$4, $$6, $$7]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.part
+                      -- DATASOURCE_SCAN  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          empty-tuple-source
+                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q1_pricing_summary_report.plan b/hivesterix/src/test/resources/optimizerts/results/q1_pricing_summary_report.plan
new file mode 100644
index 0000000..0e9c90f
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q1_pricing_summary_report.plan
@@ -0,0 +1,42 @@
+write [%0->$$17, %0->$$18, %0->$$19, %0->$$20, %0->$$21, %0->$$22, %0->$$23, %0->$$24, %0->$$25, %0->$$27]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24, $$25, $$27])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$27] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$26]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$17(ASC), $$18(ASC) ]  |PARTITIONED|
+        order (ASC, %0->$$17) (ASC, %0->$$18) 
+        -- STABLE_SORT [$$17(ASC), $$18(ASC)]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            group by ([$$17 := %0->$$37; $$18 := %0->$$38]) decor ([]) {
+                      aggregate [$$19, $$20, $$21, $$22, $$23, $$24, $$25, $$26] <- [function-call: hive:sum(FINAL), Args:[%0->$$29], function-call: hive:sum(FINAL), Args:[%0->$$30], function-call: hive:sum(FINAL), Args:[%0->$$31], function-call: hive:sum(FINAL), Args:[%0->$$32], function-call: hive:avg(FINAL), Args:[%0->$$33], function-call: hive:avg(FINAL), Args:[%0->$$34], function-call: hive:avg(FINAL), Args:[%0->$$35], function-call: hive:count(FINAL), Args:[%0->$$36]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$37, $$38]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$37, $$38]  |PARTITIONED|
+                group by ([$$37 := %0->$$9; $$38 := %0->$$10]) decor ([]) {
+                          aggregate [$$29, $$30, $$31, $$32, $$33, $$34, $$35, $$36] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$5], function-call: hive:sum(PARTIAL1), Args:[%0->$$6], function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$6, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$7]]], function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$6, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$7]], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPPlus, Args:[1, %0->$$8]]], function-call: hive:avg(PARTIAL1), Args:[%0->$$5], function-call: hive:avg(PARTIAL1), Args:[%0->$$6], function-call: hive:avg(PARTIAL1), Args:[%0->$$7], function-call: hive:count(PARTIAL1), Args:[1]]
+                          -- AGGREGATE  |LOCAL|
+                            nested tuple source
+                            -- NESTED_TUPLE_SOURCE  |LOCAL|
+                       }
+                -- EXTERNAL_GROUP_BY[$$9, $$10]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    project ([$$9, $$10, $$5, $$6, $$7, $$8])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      select (function-call: algebricks:le, Args:[%0->$$11, 1998-09-02])
+                      -- STREAM_SELECT  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          data-scan [$$5, $$6, $$7, $$8, $$9, $$10, $$11]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                          -- DATASOURCE_SCAN  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              empty-tuple-source
+                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q20_potential_part_promotion.plan b/hivesterix/src/test/resources/optimizerts/results/q20_potential_part_promotion.plan
new file mode 100644
index 0000000..eddfca5
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q20_potential_part_promotion.plan
@@ -0,0 +1,178 @@
+write [%0->$$1]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    distinct ([%0->$$1])
+    -- PRE_SORTED_DISTINCT_BY  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+        order (ASC, %0->$$1) 
+        -- STABLE_SORT [$$1(ASC)]  |LOCAL|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+            project ([$$1])
+            -- STREAM_PROJECT  |PARTITIONED|
+              select (function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$2, forest%])
+              -- STREAM_SELECT  |PARTITIONED|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  data-scan [$$1, $$2]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.part
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$17, %0->$$18, %0->$$20]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$17, $$18, $$20])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$20] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[0.5, %0->$$19]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        group by ([$$17 := %0->$$23; $$18 := %0->$$24]) decor ([]) {
+                  aggregate [$$19] <- [function-call: hive:sum(FINAL), Args:[%0->$$22]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[$$23, $$24]  |PARTITIONED|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE [$$23, $$24]  |PARTITIONED|
+            group by ([$$23 := %0->$$2; $$24 := %0->$$3]) decor ([]) {
+                      aggregate [$$22] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$5]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$2, $$3]  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                project ([$$2, $$3, $$5])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  select (function-call: algebricks:and, Args:[function-call: algebricks:ge, Args:[%0->$$11, 1994-01-01], function-call: algebricks:lt, Args:[%0->$$11, 1995-01-01], function-call: algebricks:ge, Args:[%0->$$11, 1994-01-01], function-call: algebricks:lt, Args:[%0->$$11, 1995-01-01]])
+                  -- STREAM_SELECT  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      data-scan [$$2, $$3, $$5, $$11]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                      -- DATASOURCE_SCAN  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          empty-tuple-source
+                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$6, %0->$$7, %0->$$3]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$6, $$7, $$3])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      join (function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$1, %0->$$5], function-call: algebricks:eq, Args:[%0->$$2, %0->$$6]])
+      -- HYBRID_HASH_JOIN [$$1, $$2][$$5, $$6]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$1, $$2]  |PARTITIONED|
+          data-scan []<-[$$1, $$2, $$3] <- default.q20_tmp2
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$5, $$6]  |PARTITIONED|
+          project ([$$5, $$6, $$7])
+          -- STREAM_PROJECT  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              join (function-call: algebricks:eq, Args:[%0->$$4, %0->$$5])
+              -- HYBRID_HASH_JOIN [$$4][$$5]  |PARTITIONED|
+                exchange 
+                -- HASH_PARTITION_EXCHANGE [$$4]  |PARTITIONED|
+                  data-scan []<-[$$4] <- default.q20_tmp1
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                exchange 
+                -- HASH_PARTITION_EXCHANGE [$$5]  |PARTITIONED|
+                  data-scan [$$5, $$6, $$7]<-[$$5, $$6, $$7, $$8, $$9] <- default.partsupp
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$1]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    distinct ([%0->$$1])
+    -- PRE_SORTED_DISTINCT_BY  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+        order (ASC, %0->$$1) 
+        -- STABLE_SORT [$$1(ASC)]  |LOCAL|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+            project ([$$1])
+            -- STREAM_PROJECT  |PARTITIONED|
+              select (function-call: algebricks:gt, Args:[%0->$$2, %0->$$3])
+              -- STREAM_SELECT  |PARTITIONED|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  data-scan []<-[$$1, $$2, $$3] <- default.q20_tmp3
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$3, %0->$$4]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- SORT_MERGE_EXCHANGE [$$3(ASC) ]  |PARTITIONED|
+    order (ASC, %0->$$3) 
+    -- STABLE_SORT [$$3(ASC)]  |LOCAL|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        project ([$$3, $$4])
+        -- STREAM_PROJECT  |PARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$2])
+            -- HYBRID_HASH_JOIN [$$1][$$2]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                data-scan []<-[$$1] <- default.q20_tmp4
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+                project ([$$2, $$3, $$4])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    join (function-call: algebricks:eq, Args:[%0->$$9, %0->$$5])
+                    -- HYBRID_HASH_JOIN [$$9][$$5]  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$9]  |PARTITIONED|
+                        project ([$$9])
+                        -- STREAM_PROJECT  |PARTITIONED|
+                          select (function-call: algebricks:eq, Args:[%0->$$10, CANADA])
+                          -- STREAM_SELECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              data-scan [$$9, $$10]<-[$$9, $$10, $$11, $$12] <- default.nation
+                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  empty-tuple-source
+                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$5]  |PARTITIONED|
+                        data-scan [$$5, $$2, $$3, $$4]<-[$$2, $$3, $$4, $$5, $$6, $$7, $$8] <- default.supplier
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q21_suppliers_who_kept_orders_waiting.plan b/hivesterix/src/test/resources/optimizerts/results/q21_suppliers_who_kept_orders_waiting.plan
new file mode 100644
index 0000000..cc47cf3
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q21_suppliers_who_kept_orders_waiting.plan
@@ -0,0 +1,224 @@
+write [%0->$$21, %0->$$24, %0->$$23]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$21, $$24, $$23])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$24] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$22]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        group by ([$$21 := %0->$$17]) decor ([]) {
+                  aggregate [$$22, $$23] <- [function-call: hive:count(COMPLETE), Args:[%0->$$18], function-call: hive:max(FINAL), Args:[%0->$$20]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- PRE_CLUSTERED_GROUP_BY[$$17]  |PARTITIONED|
+          exchange 
+          -- HASH_PARTITION_MERGE_EXCHANGE MERGE:[$$17(ASC)] HASH:[$$17]  |PARTITIONED|
+            group by ([$$17 := %0->$$1; $$18 := %0->$$3]) decor ([]) {
+                      aggregate [$$20] <- [function-call: hive:max(PARTIAL1), Args:[%0->$$3]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- PRE_CLUSTERED_GROUP_BY[$$1, $$3]  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+                order (ASC, %0->$$1) (ASC, %0->$$3) 
+                -- STABLE_SORT [$$1(ASC), $$3(ASC)]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    data-scan [$$1, $$3]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                    -- DATASOURCE_SCAN  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        empty-tuple-source
+                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$21, %0->$$24, %0->$$23]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$21, $$24, $$23])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$24] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$22]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        group by ([$$21 := %0->$$17]) decor ([]) {
+                  aggregate [$$22, $$23] <- [function-call: hive:count(COMPLETE), Args:[%0->$$18], function-call: hive:max(FINAL), Args:[%0->$$20]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- PRE_CLUSTERED_GROUP_BY[$$17]  |PARTITIONED|
+          exchange 
+          -- HASH_PARTITION_MERGE_EXCHANGE MERGE:[$$17(ASC)] HASH:[$$17]  |PARTITIONED|
+            group by ([$$17 := %0->$$1; $$18 := %0->$$3]) decor ([]) {
+                      aggregate [$$20] <- [function-call: hive:max(PARTIAL1), Args:[%0->$$3]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- PRE_CLUSTERED_GROUP_BY[$$1, $$3]  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+                order (ASC, %0->$$1) (ASC, %0->$$3) 
+                -- STABLE_SORT [$$1(ASC), $$3(ASC)]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    project ([$$1, $$3])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      select (function-call: algebricks:gt, Args:[%0->$$13, %0->$$12])
+                      -- STREAM_SELECT  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          data-scan [$$1, $$3, $$12, $$13]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                          -- DATASOURCE_SCAN  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              empty-tuple-source
+                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$43, %0->$$45]
+-- SINK_WRITE  |UNPARTITIONED|
+  project ([$$43, $$45])
+  -- STREAM_PROJECT  |UNPARTITIONED|
+    assign [$$45] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$44]]
+    -- ASSIGN  |UNPARTITIONED|
+      limit 100
+      -- STREAM_LIMIT  |UNPARTITIONED|
+        limit 100
+        -- STREAM_LIMIT  |UNPARTITIONED|
+          exchange 
+          -- SORT_MERGE_EXCHANGE [$$44(DESC), $$43(ASC) ]  |PARTITIONED|
+            limit 100
+            -- STREAM_LIMIT  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+                order (DESC, %0->$$44) (ASC, %0->$$43) 
+                -- STABLE_SORT [$$44(DESC), $$43(ASC)]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    group by ([$$43 := %0->$$48]) decor ([]) {
+                              aggregate [$$44] <- [function-call: hive:count(FINAL), Args:[%0->$$47]]
+                              -- AGGREGATE  |LOCAL|
+                                nested tuple source
+                                -- NESTED_TUPLE_SOURCE  |LOCAL|
+                           }
+                    -- EXTERNAL_GROUP_BY[$$48]  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$48]  |PARTITIONED|
+                        group by ([$$48 := %0->$$37]) decor ([]) {
+                                  aggregate [$$47] <- [function-call: hive:count(PARTIAL1), Args:[1]]
+                                  -- AGGREGATE  |LOCAL|
+                                    nested tuple source
+                                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+                               }
+                        -- EXTERNAL_GROUP_BY[$$37]  |LOCAL|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            project ([$$37])
+                            -- STREAM_PROJECT  |PARTITIONED|
+                              select (function-call: algebricks:or, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull, Args:[%0->$$2], function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$2, 1], function-call: algebricks:eq, Args:[%0->$$18, %0->$$3]]])
+                              -- STREAM_SELECT  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  left outer join (function-call: algebricks:eq, Args:[%0->$$4, %0->$$1])
+                                  -- HYBRID_HASH_JOIN [$$4][$$1]  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$4]  |PARTITIONED|
+                                      project ([$$37, $$4, $$18])
+                                      -- STREAM_PROJECT  |PARTITIONED|
+                                        select (function-call: algebricks:or, Args:[function-call: algebricks:gt, Args:[%0->$$5, 1], function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$5, 1], function-call: algebricks:neq, Args:[%0->$$18, %0->$$6]]])
+                                        -- STREAM_SELECT  |UNPARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            join (function-call: algebricks:eq, Args:[%0->$$16, %0->$$4])
+                                            -- HYBRID_HASH_JOIN [$$16][$$4]  |PARTITIONED|
+                                              exchange 
+                                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                project ([$$37, $$16, $$18])
+                                                -- STREAM_PROJECT  |PARTITIONED|
+                                                  exchange 
+                                                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                    join (function-call: algebricks:eq, Args:[%0->$$16, %0->$$7])
+                                                    -- HYBRID_HASH_JOIN [$$16][$$7]  |PARTITIONED|
+                                                      exchange 
+                                                      -- HASH_PARTITION_EXCHANGE [$$16]  |PARTITIONED|
+                                                        project ([$$37, $$16, $$18])
+                                                        -- STREAM_PROJECT  |PARTITIONED|
+                                                          exchange 
+                                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                            join (function-call: algebricks:eq, Args:[%0->$$18, %0->$$36])
+                                                            -- HYBRID_HASH_JOIN [$$18][$$36]  |PARTITIONED|
+                                                              exchange 
+                                                              -- HASH_PARTITION_EXCHANGE [$$18]  |PARTITIONED|
+                                                                project ([$$16, $$18])
+                                                                -- STREAM_PROJECT  |PARTITIONED|
+                                                                  select (function-call: algebricks:and, Args:[function-call: algebricks:gt, Args:[%0->$$28, %0->$$27], function-call: algebricks:gt, Args:[%0->$$28, %0->$$27]])
+                                                                  -- STREAM_SELECT  |PARTITIONED|
+                                                                    exchange 
+                                                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                      data-scan [$$16, $$18, $$27, $$28]<-[$$16, $$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24, $$25, $$26, $$27, $$28, $$29, $$30, $$31] <- default.lineitem
+                                                                      -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                        exchange 
+                                                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                          empty-tuple-source
+                                                                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                              exchange 
+                                                              -- HASH_PARTITION_EXCHANGE [$$36]  |PARTITIONED|
+                                                                project ([$$36, $$37])
+                                                                -- STREAM_PROJECT  |PARTITIONED|
+                                                                  exchange 
+                                                                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                    join (function-call: algebricks:eq, Args:[%0->$$39, %0->$$32])
+                                                                    -- HYBRID_HASH_JOIN [$$39][$$32]  |PARTITIONED|
+                                                                      exchange 
+                                                                      -- HASH_PARTITION_EXCHANGE [$$39]  |PARTITIONED|
+                                                                        data-scan [$$39, $$36, $$37]<-[$$36, $$37, $$38, $$39, $$40, $$41, $$42] <- default.supplier
+                                                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                          exchange 
+                                                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                            empty-tuple-source
+                                                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                                      exchange 
+                                                                      -- HASH_PARTITION_EXCHANGE [$$32]  |PARTITIONED|
+                                                                        project ([$$32])
+                                                                        -- STREAM_PROJECT  |PARTITIONED|
+                                                                          select (function-call: algebricks:eq, Args:[%0->$$33, SAUDI ARABIA])
+                                                                          -- STREAM_SELECT  |PARTITIONED|
+                                                                            exchange 
+                                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                              data-scan [$$32, $$33]<-[$$32, $$33, $$34, $$35] <- default.nation
+                                                                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                                exchange 
+                                                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                                  empty-tuple-source
+                                                                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                      exchange 
+                                                      -- HASH_PARTITION_EXCHANGE [$$7]  |PARTITIONED|
+                                                        project ([$$7])
+                                                        -- STREAM_PROJECT  |PARTITIONED|
+                                                          select (function-call: algebricks:eq, Args:[%0->$$9, F])
+                                                          -- STREAM_SELECT  |PARTITIONED|
+                                                            exchange 
+                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                              data-scan [$$7, $$9]<-[$$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15] <- default.orders
+                                                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                exchange 
+                                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                  empty-tuple-source
+                                                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                              exchange 
+                                              -- HASH_PARTITION_EXCHANGE [$$4]  |PARTITIONED|
+                                                data-scan [$$4, $$5, $$6]<-[$$4, $$5, $$6] <- default.q21_tmp1
+                                                -- DATASOURCE_SCAN  |PARTITIONED|
+                                                  exchange 
+                                                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                    empty-tuple-source
+                                                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                                      data-scan [$$1, $$2, $$3]<-[$$1, $$2, $$3] <- default.q21_tmp2
+                                      -- DATASOURCE_SCAN  |PARTITIONED|
+                                        exchange 
+                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                          empty-tuple-source
+                                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q22_global_sales_opportunity.plan b/hivesterix/src/test/resources/optimizerts/results/q22_global_sales_opportunity.plan
new file mode 100644
index 0000000..591576b
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q22_global_sales_opportunity.plan
@@ -0,0 +1,136 @@
+write [%0->$$6, %0->$$1, %0->$$9]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$6, $$1, $$9])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$9] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2]]
+    -- ASSIGN  |PARTITIONED|
+      select (function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:or, Args:[function-call: algebricks:eq, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2], 13], function-call: algebricks:eq, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2], 31]], function-call: algebricks:eq, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2], 23]], function-call: algebricks:eq, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2], 29]], function-call: algebricks:eq, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2], 30]], function-call: algebricks:eq, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2], 18]], function-call: algebricks:eq, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFSubstr, Args:[%0->$$5, 1, 2], 17]])
+      -- STREAM_SELECT  |PARTITIONED|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          data-scan []<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8] <- default.customer
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$4]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    group by ([]) decor ([]) {
+              aggregate [$$4] <- [function-call: hive:avg(FINAL), Args:[%0->$$6]]
+              -- AGGREGATE  |LOCAL|
+                nested tuple source
+                -- NESTED_TUPLE_SOURCE  |LOCAL|
+           }
+    -- EXTERNAL_GROUP_BY[]  |PARTITIONED|
+      exchange 
+      -- HASH_PARTITION_EXCHANGE []  |PARTITIONED|
+        group by ([]) decor ([]) {
+                  aggregate [$$6] <- [function-call: hive:avg(PARTIAL1), Args:[%0->$$1]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            select (function-call: algebricks:gt, Args:[%0->$$1, 0.0])
+            -- STREAM_SELECT  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                data-scan [$$1]<-[$$1, $$2, $$3] <- default.q22_customer_tmp
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$2]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    distinct ([%0->$$2])
+    -- PRE_SORTED_DISTINCT_BY  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+        order (ASC, %0->$$2) 
+        -- STABLE_SORT [$$2(ASC)]  |LOCAL|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+            data-scan [$$2]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.orders
+            -- DATASOURCE_SCAN  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                empty-tuple-source
+                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$6, %0->$$9, %0->$$8]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$6, $$9, $$8])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$9] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$7]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$6(ASC) ]  |PARTITIONED|
+        order (ASC, %0->$$6) 
+        -- STABLE_SORT [$$6(ASC)]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            group by ([$$6 := %0->$$13]) decor ([]) {
+                      aggregate [$$7, $$8] <- [function-call: hive:count(FINAL), Args:[%0->$$11], function-call: hive:sum(FINAL), Args:[%0->$$12]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$13]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$13]  |PARTITIONED|
+                group by ([$$13 := %0->$$5]) decor ([]) {
+                          aggregate [$$11, $$12] <- [function-call: hive:count(PARTIAL1), Args:[1], function-call: hive:sum(PARTIAL1), Args:[%0->$$3]]
+                          -- AGGREGATE  |LOCAL|
+                            nested tuple source
+                            -- NESTED_TUPLE_SOURCE  |LOCAL|
+                       }
+                -- EXTERNAL_GROUP_BY[$$5]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    project ([$$5, $$3])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        join (function-call: algebricks:and, Args:[function-call: algebricks:gt, Args:[%0->$$3, %0->$$1], true])
+                        -- NESTED_LOOP  |PARTITIONED|
+                          exchange 
+                          -- BROADCAST_EXCHANGE  |PARTITIONED|
+                            project ([$$5, $$3])
+                            -- STREAM_PROJECT  |PARTITIONED|
+                              select (function-call: hive:org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull, Args:[%0->$$2])
+                              -- STREAM_SELECT  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  left outer join (function-call: algebricks:eq, Args:[%0->$$4, %0->$$2])
+                                  -- HYBRID_HASH_JOIN [$$4][$$2]  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$4]  |PARTITIONED|
+                                      data-scan []<-[$$3, $$4, $$5] <- default.q22_customer_tmp
+                                      -- DATASOURCE_SCAN  |PARTITIONED|
+                                        exchange 
+                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                          empty-tuple-source
+                                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+                                      data-scan [$$2]<-[$$2] <- default.q22_orders_tmp
+                                      -- DATASOURCE_SCAN  |PARTITIONED|
+                                        exchange 
+                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                          empty-tuple-source
+                                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            data-scan [$$1]<-[$$1] <- default.q22_customer_tmp1
+                            -- DATASOURCE_SCAN  |PARTITIONED|
+                              exchange 
+                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                empty-tuple-source
+                                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q2_minimum_cost_supplier.plan b/hivesterix/src/test/resources/optimizerts/results/q2_minimum_cost_supplier.plan
new file mode 100644
index 0000000..151f34d
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q2_minimum_cost_supplier.plan
@@ -0,0 +1,156 @@
+write [%0->$$20, %0->$$16, %0->$$26, %0->$$1, %0->$$13, %0->$$3, %0->$$17, %0->$$19, %0->$$21]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$20, $$16, $$26, $$1, $$13, $$3, $$17, $$19, $$21])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$10])
+      -- HYBRID_HASH_JOIN [$$1][$$10]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+          project ([$$1, $$3])
+          -- STREAM_PROJECT  |PARTITIONED|
+            select (function-call: algebricks:and, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$5, %BRASS], function-call: algebricks:eq, Args:[%0->$$6, 15], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$5, %BRASS]])
+            -- STREAM_SELECT  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                data-scan [$$1, $$3, $$5, $$6]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.part
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$10]  |PARTITIONED|
+          project ([$$10, $$16, $$17, $$19, $$20, $$21, $$26, $$13])
+          -- STREAM_PROJECT  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              join (function-call: algebricks:eq, Args:[%0->$$11, %0->$$15])
+              -- HYBRID_HASH_JOIN [$$11][$$15]  |PARTITIONED|
+                exchange 
+                -- HASH_PARTITION_EXCHANGE [$$11]  |PARTITIONED|
+                  data-scan [$$11, $$10, $$13]<-[$$10, $$11, $$12, $$13, $$14] <- default.partsupp
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                exchange 
+                -- HASH_PARTITION_EXCHANGE [$$15]  |PARTITIONED|
+                  project ([$$15, $$16, $$17, $$19, $$20, $$21, $$26])
+                  -- STREAM_PROJECT  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      join (function-call: algebricks:eq, Args:[%0->$$18, %0->$$25])
+                      -- HYBRID_HASH_JOIN [$$18][$$25]  |PARTITIONED|
+                        exchange 
+                        -- HASH_PARTITION_EXCHANGE [$$18]  |PARTITIONED|
+                          data-scan []<-[$$15, $$16, $$17, $$18, $$19, $$20, $$21] <- default.supplier
+                          -- DATASOURCE_SCAN  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              empty-tuple-source
+                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                        exchange 
+                        -- HASH_PARTITION_EXCHANGE [$$25]  |PARTITIONED|
+                          project ([$$25, $$26])
+                          -- STREAM_PROJECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              join (function-call: algebricks:eq, Args:[%0->$$22, %0->$$27])
+                              -- HYBRID_HASH_JOIN [$$22][$$27]  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$22]  |PARTITIONED|
+                                  project ([$$22])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    select (function-call: algebricks:eq, Args:[%0->$$23, EUROPE])
+                                    -- STREAM_SELECT  |PARTITIONED|
+                                      exchange 
+                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                        data-scan [$$23, $$22]<-[$$22, $$23, $$24] <- default.region
+                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            empty-tuple-source
+                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$27]  |PARTITIONED|
+                                  data-scan [$$27, $$25, $$26]<-[$$25, $$26, $$27, $$28] <- default.nation
+                                  -- DATASOURCE_SCAN  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      empty-tuple-source
+                                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$10, %0->$$11]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$10, $$11])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      group by ([$$10 := %0->$$14]) decor ([]) {
+                aggregate [$$11] <- [function-call: hive:min(FINAL), Args:[%0->$$13]]
+                -- AGGREGATE  |LOCAL|
+                  nested tuple source
+                  -- NESTED_TUPLE_SOURCE  |LOCAL|
+             }
+      -- PRE_CLUSTERED_GROUP_BY[$$14]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_MERGE_EXCHANGE MERGE:[$$14(ASC)] HASH:[$$14]  |PARTITIONED|
+          group by ([$$14 := %0->$$4]) decor ([]) {
+                    aggregate [$$13] <- [function-call: hive:min(PARTIAL1), Args:[%0->$$5]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- PRE_CLUSTERED_GROUP_BY[$$4]  |LOCAL|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+              order (ASC, %0->$$4) 
+              -- STABLE_SORT [$$4(ASC)]  |LOCAL|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  data-scan [$$4, $$5]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.q2_minimum_cost_supplier_tmp1
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$3, %0->$$4, %0->$$5, %0->$$6, %0->$$8, %0->$$9, %0->$$10, %0->$$11]
+-- SINK_WRITE  |UNPARTITIONED|
+  limit 100
+  -- STREAM_LIMIT  |UNPARTITIONED|
+    limit 100
+    -- STREAM_LIMIT  |UNPARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$3(DESC), $$5(ASC), $$4(ASC), $$6(ASC) ]  |PARTITIONED|
+        limit 100
+        -- STREAM_LIMIT  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+            order (DESC, %0->$$3) (ASC, %0->$$5) (ASC, %0->$$4) (ASC, %0->$$6) 
+            -- STABLE_SORT [$$3(DESC), $$5(ASC), $$4(ASC), $$6(ASC)]  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                project ([$$3, $$4, $$5, $$6, $$8, $$9, $$10, $$11])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    join (function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$1, %0->$$6], function-call: algebricks:eq, Args:[%0->$$2, %0->$$7]])
+                    -- HYBRID_HASH_JOIN [$$1, $$2][$$6, $$7]  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$1, $$2]  |PARTITIONED|
+                        data-scan []<-[$$1, $$2] <- default.q2_minimum_cost_supplier_tmp2
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                      exchange 
+                      -- HASH_PARTITION_EXCHANGE [$$6, $$7]  |PARTITIONED|
+                        data-scan [$$6, $$7, $$3, $$4, $$5, $$8, $$9, $$10, $$11]<-[$$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11] <- default.q2_minimum_cost_supplier_tmp1
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q3_shipping_priority.plan b/hivesterix/src/test/resources/optimizerts/results/q3_shipping_priority.plan
new file mode 100644
index 0000000..a1b8e42
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q3_shipping_priority.plan
@@ -0,0 +1,70 @@
+write [%0->$$1, %0->$$34, %0->$$29, %0->$$32]
+-- SINK_WRITE  |UNPARTITIONED|
+  limit 10
+  -- STREAM_LIMIT  |UNPARTITIONED|
+    limit 10
+    -- STREAM_LIMIT  |UNPARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$34(DESC) ]  |PARTITIONED|
+        limit 10
+        -- STREAM_LIMIT  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+            order (DESC, %0->$$34) 
+            -- STABLE_SORT [$$34(DESC)]  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                project ([$$1, $$34, $$29, $$32])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  assign [$$34] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$6, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$7]]]
+                  -- ASSIGN  |PARTITIONED|
+                    project ([$$29, $$32, $$1, $$6, $$7])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$25])
+                        -- HYBRID_HASH_JOIN [$$1][$$25]  |PARTITIONED|
+                          exchange 
+                          -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                            project ([$$1, $$6, $$7])
+                            -- STREAM_PROJECT  |PARTITIONED|
+                              select (function-call: algebricks:lt, Args:[%0->$$4, 3])
+                              -- STREAM_SELECT  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  data-scan [$$1, $$4, $$6, $$7]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                                  -- DATASOURCE_SCAN  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      empty-tuple-source
+                                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                          exchange 
+                          -- HASH_PARTITION_EXCHANGE [$$25]  |PARTITIONED|
+                            project ([$$25, $$29, $$32])
+                            -- STREAM_PROJECT  |PARTITIONED|
+                              exchange 
+                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                join (function-call: algebricks:eq, Args:[%0->$$26, %0->$$17])
+                                -- HYBRID_HASH_JOIN [$$26][$$17]  |PARTITIONED|
+                                  exchange 
+                                  -- HASH_PARTITION_EXCHANGE [$$26]  |PARTITIONED|
+                                    data-scan [$$26, $$25, $$29, $$32]<-[$$25, $$26, $$27, $$28, $$29, $$30, $$31, $$32, $$33] <- default.orders
+                                    -- DATASOURCE_SCAN  |PARTITIONED|
+                                      exchange 
+                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                        empty-tuple-source
+                                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                  exchange 
+                                  -- HASH_PARTITION_EXCHANGE [$$17]  |PARTITIONED|
+                                    project ([$$17])
+                                    -- STREAM_PROJECT  |PARTITIONED|
+                                      select (function-call: algebricks:eq, Args:[%0->$$23, BUILDING])
+                                      -- STREAM_SELECT  |PARTITIONED|
+                                        exchange 
+                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                          data-scan [$$17, $$23]<-[$$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24] <- default.customer
+                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              empty-tuple-source
+                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q4_order_priority.plan b/hivesterix/src/test/resources/optimizerts/results/q4_order_priority.plan
new file mode 100644
index 0000000..435fd7c
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q4_order_priority.plan
@@ -0,0 +1,82 @@
+write [%0->$$1]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    distinct ([%0->$$1])
+    -- PRE_SORTED_DISTINCT_BY  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+        order (ASC, %0->$$1) 
+        -- STABLE_SORT [$$1(ASC)]  |LOCAL|
+          exchange 
+          -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+            project ([$$1])
+            -- STREAM_PROJECT  |PARTITIONED|
+              select (function-call: algebricks:lt, Args:[%0->$$12, %0->$$13])
+              -- STREAM_SELECT  |PARTITIONED|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  data-scan [$$1, $$12, $$13]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$11, %0->$$13]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$11, $$13])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$13] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFToInteger, Args:[%0->$$12]]
+    -- ASSIGN  |PARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$11(ASC) ]  |PARTITIONED|
+        order (ASC, %0->$$11) 
+        -- STABLE_SORT [$$11(ASC)]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            group by ([$$11 := %0->$$16]) decor ([]) {
+                      aggregate [$$12] <- [function-call: hive:count(FINAL), Args:[%0->$$15]]
+                      -- AGGREGATE  |LOCAL|
+                        nested tuple source
+                        -- NESTED_TUPLE_SOURCE  |LOCAL|
+                   }
+            -- EXTERNAL_GROUP_BY[$$16]  |PARTITIONED|
+              exchange 
+              -- HASH_PARTITION_EXCHANGE [$$16]  |PARTITIONED|
+                group by ([$$16 := %0->$$7]) decor ([]) {
+                          aggregate [$$15] <- [function-call: hive:count(PARTIAL1), Args:[1]]
+                          -- AGGREGATE  |LOCAL|
+                            nested tuple source
+                            -- NESTED_TUPLE_SOURCE  |LOCAL|
+                       }
+                -- EXTERNAL_GROUP_BY[$$7]  |LOCAL|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    project ([$$7])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$2])
+                        -- HYBRID_HASH_JOIN [$$1][$$2]  |PARTITIONED|
+                          exchange 
+                          -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                            data-scan []<-[$$1] <- default.q4_order_priority_tmp
+                            -- DATASOURCE_SCAN  |PARTITIONED|
+                              exchange 
+                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                empty-tuple-source
+                                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                          exchange 
+                          -- HASH_PARTITION_EXCHANGE [$$2]  |PARTITIONED|
+                            project ([$$2, $$7])
+                            -- STREAM_PROJECT  |PARTITIONED|
+                              select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$6, 1993-10-01], function-call: algebricks:ge, Args:[%0->$$6, 1993-07-01], function-call: algebricks:lt, Args:[%0->$$6, 1993-10-01]])
+                              -- STREAM_SELECT  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  data-scan [$$2, $$6, $$7]<-[$$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10] <- default.orders
+                                  -- DATASOURCE_SCAN  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      empty-tuple-source
+                                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q5_local_supplier_volume.plan b/hivesterix/src/test/resources/optimizerts/results/q5_local_supplier_volume.plan
new file mode 100644
index 0000000..177d24c
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q5_local_supplier_volume.plan
@@ -0,0 +1,126 @@
+write [%0->$$48, %0->$$49]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$48, $$49])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- SORT_MERGE_EXCHANGE [$$49(DESC) ]  |PARTITIONED|
+      order (DESC, %0->$$49) 
+      -- STABLE_SORT [$$49(DESC)]  |LOCAL|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          group by ([$$48 := %0->$$52]) decor ([]) {
+                    aggregate [$$49] <- [function-call: hive:sum(FINAL), Args:[%0->$$51]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- EXTERNAL_GROUP_BY[$$52]  |PARTITIONED|
+            exchange 
+            -- HASH_PARTITION_EXCHANGE [$$52]  |PARTITIONED|
+              group by ([$$52 := %0->$$42]) decor ([]) {
+                        aggregate [$$51] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$23, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$24]]]]
+                        -- AGGREGATE  |LOCAL|
+                          nested tuple source
+                          -- NESTED_TUPLE_SOURCE  |LOCAL|
+                     }
+              -- EXTERNAL_GROUP_BY[$$42]  |LOCAL|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  project ([$$42, $$23, $$24])
+                  -- STREAM_PROJECT  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      join (function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$37, %0->$$4], function-call: algebricks:eq, Args:[%0->$$10, %0->$$1]])
+                      -- HYBRID_HASH_JOIN [$$37, $$10][$$4, $$1]  |PARTITIONED|
+                        exchange 
+                        -- HASH_PARTITION_EXCHANGE [$$37, $$10]  |PARTITIONED|
+                          project ([$$10, $$42, $$23, $$24, $$37])
+                          -- STREAM_PROJECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              join (function-call: algebricks:eq, Args:[%0->$$18, %0->$$9])
+                              -- HYBRID_HASH_JOIN [$$18][$$9]  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$18]  |PARTITIONED|
+                                  project ([$$18, $$23, $$24, $$42, $$37])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      join (function-call: algebricks:eq, Args:[%0->$$34, %0->$$20])
+                                      -- HYBRID_HASH_JOIN [$$34][$$20]  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$34]  |PARTITIONED|
+                                          project ([$$34, $$37, $$42])
+                                          -- STREAM_PROJECT  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              join (function-call: algebricks:eq, Args:[%0->$$41, %0->$$37])
+                                              -- HYBRID_HASH_JOIN [$$41][$$37]  |PARTITIONED|
+                                                exchange 
+                                                -- HASH_PARTITION_EXCHANGE [$$41]  |PARTITIONED|
+                                                  project ([$$41, $$42])
+                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                    exchange 
+                                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                      join (function-call: algebricks:eq, Args:[%0->$$45, %0->$$43])
+                                                      -- HYBRID_HASH_JOIN [$$45][$$43]  |PARTITIONED|
+                                                        exchange 
+                                                        -- HASH_PARTITION_EXCHANGE [$$45]  |PARTITIONED|
+                                                          project ([$$45])
+                                                          -- STREAM_PROJECT  |PARTITIONED|
+                                                            select (function-call: algebricks:eq, Args:[%0->$$46, ASIA])
+                                                            -- STREAM_SELECT  |PARTITIONED|
+                                                              exchange 
+                                                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                data-scan [$$46, $$45]<-[$$45, $$46, $$47] <- default.region
+                                                                -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                  exchange 
+                                                                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                    empty-tuple-source
+                                                                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                        exchange 
+                                                        -- HASH_PARTITION_EXCHANGE [$$43]  |PARTITIONED|
+                                                          data-scan [$$43, $$41, $$42]<-[$$41, $$42, $$43, $$44] <- default.nation
+                                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                                            exchange 
+                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                              empty-tuple-source
+                                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                exchange 
+                                                -- HASH_PARTITION_EXCHANGE [$$37]  |PARTITIONED|
+                                                  data-scan [$$37, $$34]<-[$$34, $$35, $$36, $$37, $$38, $$39, $$40] <- default.supplier
+                                                  -- DATASOURCE_SCAN  |PARTITIONED|
+                                                    exchange 
+                                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                      empty-tuple-source
+                                                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$20]  |PARTITIONED|
+                                          data-scan [$$20, $$18, $$23, $$24]<-[$$18, $$19, $$20, $$21, $$22, $$23, $$24, $$25, $$26, $$27, $$28, $$29, $$30, $$31, $$32, $$33] <- default.lineitem
+                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              empty-tuple-source
+                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$9]  |PARTITIONED|
+                                  project ([$$9, $$10])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$13, 1995-01-01], function-call: algebricks:ge, Args:[%0->$$13, 1994-01-01], function-call: algebricks:lt, Args:[%0->$$13, 1995-01-01]])
+                                    -- STREAM_SELECT  |PARTITIONED|
+                                      exchange 
+                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                        data-scan [$$9, $$10, $$13]<-[$$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16, $$17] <- default.orders
+                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            empty-tuple-source
+                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                        exchange 
+                        -- HASH_PARTITION_EXCHANGE [$$4, $$1]  |PARTITIONED|
+                          data-scan [$$4, $$1]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8] <- default.customer
+                          -- DATASOURCE_SCAN  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              empty-tuple-source
+                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q6_forecast_revenue_change.plan b/hivesterix/src/test/resources/optimizerts/results/q6_forecast_revenue_change.plan
new file mode 100644
index 0000000..cd9ffcd
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q6_forecast_revenue_change.plan
@@ -0,0 +1,34 @@
+write [%0->$$17]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    group by ([]) decor ([]) {
+              aggregate [$$17] <- [function-call: hive:sum(FINAL), Args:[%0->$$19]]
+              -- AGGREGATE  |LOCAL|
+                nested tuple source
+                -- NESTED_TUPLE_SOURCE  |LOCAL|
+           }
+    -- EXTERNAL_GROUP_BY[]  |PARTITIONED|
+      exchange 
+      -- HASH_PARTITION_EXCHANGE []  |PARTITIONED|
+        group by ([]) decor ([]) {
+                  aggregate [$$19] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$6, %0->$$7]]]
+                  -- AGGREGATE  |LOCAL|
+                    nested tuple source
+                    -- NESTED_TUPLE_SOURCE  |LOCAL|
+               }
+        -- EXTERNAL_GROUP_BY[]  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            project ([$$6, $$7])
+            -- STREAM_PROJECT  |PARTITIONED|
+              select (function-call: algebricks:and, Args:[function-call: algebricks:ge, Args:[%0->$$11, 1994-01-01], function-call: algebricks:lt, Args:[%0->$$11, 1995-01-01], function-call: algebricks:ge, Args:[%0->$$7, 0.05], function-call: algebricks:le, Args:[%0->$$7, 0.07], function-call: algebricks:lt, Args:[%0->$$5, 24], function-call: algebricks:ge, Args:[%0->$$11, 1994-01-01], function-call: algebricks:lt, Args:[%0->$$11, 1995-01-01], function-call: algebricks:ge, Args:[%0->$$7, 0.05], function-call: algebricks:le, Args:[%0->$$7, 0.07], function-call: algebricks:lt, Args:[%0->$$5, 24]])
+              -- STREAM_SELECT  |PARTITIONED|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  data-scan [$$5, $$6, $$7, $$11]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                  -- DATASOURCE_SCAN  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      empty-tuple-source
+                      -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q7_volume_shipping.plan b/hivesterix/src/test/resources/optimizerts/results/q7_volume_shipping.plan
new file mode 100644
index 0000000..39f8301
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q7_volume_shipping.plan
@@ -0,0 +1,192 @@
+write [%0->$$17, %0->$$18, %0->$$19, %0->$$20]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    union ($$6, $$10, $$17) ($$2, $$14, $$18) ($$5, $$9, $$19) ($$1, $$13, $$20)
+    -- UNION_ALL  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |UNPARTITIONED|
+        project ([$$6, $$2, $$5, $$1])
+        -- STREAM_PROJECT  |UNPARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            join (true)
+            -- NESTED_LOOP  |PARTITIONED|
+              exchange 
+              -- BROADCAST_EXCHANGE  |PARTITIONED|
+                select (function-call: algebricks:eq, Args:[%0->$$2, GERMANY])
+                -- STREAM_SELECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    data-scan [$$1, $$2]<-[$$1, $$2, $$3, $$4] <- default.nation
+                    -- DATASOURCE_SCAN  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        empty-tuple-source
+                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                project ([$$5, $$6])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  select (function-call: algebricks:eq, Args:[%0->$$6, FRANCE])
+                  -- STREAM_SELECT  |PARTITIONED|
+                    project ([$$5, $$6])
+                    -- STREAM_PROJECT  |UNPARTITIONED|
+                      assign [$$5, $$6] <- [%0->$$9, %0->$$10]
+                      -- ASSIGN  |UNPARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          replicate 
+                          -- SPLIT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              data-scan [$$9, $$10]<-[$$9, $$10, $$11, $$12] <- default.nation
+                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  empty-tuple-source
+                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |UNPARTITIONED|
+        project ([$$10, $$14, $$9, $$13])
+        -- STREAM_PROJECT  |UNPARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            join (true)
+            -- NESTED_LOOP  |PARTITIONED|
+              exchange 
+              -- BROADCAST_EXCHANGE  |PARTITIONED|
+                select (function-call: algebricks:eq, Args:[%0->$$14, FRANCE])
+                -- STREAM_SELECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    data-scan [$$13, $$14]<-[$$13, $$14, $$15, $$16] <- default.nation
+                    -- DATASOURCE_SCAN  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        empty-tuple-source
+                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                project ([$$9, $$10])
+                -- STREAM_PROJECT  |PARTITIONED|
+                  select (function-call: algebricks:eq, Args:[%0->$$10, GERMANY])
+                  -- STREAM_SELECT  |PARTITIONED|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      replicate 
+                      -- SPLIT  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          data-scan [$$9, $$10]<-[$$9, $$10, $$11, $$12] <- default.nation
+                          -- DATASOURCE_SCAN  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              empty-tuple-source
+                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$47, %0->$$48, %0->$$49, %0->$$50]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$47, $$48, $$49, $$50])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- SORT_MERGE_EXCHANGE [$$47(ASC), $$48(ASC), $$49(ASC) ]  |PARTITIONED|
+      order (ASC, %0->$$47) (ASC, %0->$$48) (ASC, %0->$$49) 
+      -- STABLE_SORT [$$47(ASC), $$48(ASC), $$49(ASC)]  |LOCAL|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          group by ([$$47 := %0->$$53; $$48 := %0->$$54; $$49 := %0->$$55]) decor ([]) {
+                    aggregate [$$50] <- [function-call: hive:sum(FINAL), Args:[%0->$$52]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- EXTERNAL_GROUP_BY[$$53, $$54, $$55]  |PARTITIONED|
+            exchange 
+            -- HASH_PARTITION_EXCHANGE [$$53, $$54, $$55]  |PARTITIONED|
+              group by ([$$53 := %0->$$1; $$54 := %0->$$2; $$55 := %0->$$45]) decor ([]) {
+                        aggregate [$$52] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$46]]
+                        -- AGGREGATE  |LOCAL|
+                          nested tuple source
+                          -- NESTED_TUPLE_SOURCE  |LOCAL|
+                     }
+              -- EXTERNAL_GROUP_BY[$$1, $$2, $$45]  |LOCAL|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  project ([$$1, $$2, $$45, $$46])
+                  -- STREAM_PROJECT  |PARTITIONED|
+                    assign [$$45, $$46] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFYear, Args:[%0->$$30], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$25, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$26]]]
+                    -- ASSIGN  |PARTITIONED|
+                      project ([$$1, $$2, $$30, $$25, $$26])
+                      -- STREAM_PROJECT  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          join (function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$15, %0->$$4], function-call: algebricks:eq, Args:[%0->$$8, %0->$$3]])
+                          -- HYBRID_HASH_JOIN [$$15, $$8][$$4, $$3]  |PARTITIONED|
+                            exchange 
+                            -- HASH_PARTITION_EXCHANGE [$$15, $$8]  |PARTITIONED|
+                              project ([$$8, $$30, $$25, $$26, $$15])
+                              -- STREAM_PROJECT  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  join (function-call: algebricks:eq, Args:[%0->$$22, %0->$$5])
+                                  -- HYBRID_HASH_JOIN [$$22][$$5]  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$22]  |PARTITIONED|
+                                      project ([$$15, $$30, $$25, $$26, $$22])
+                                      -- STREAM_PROJECT  |PARTITIONED|
+                                        exchange 
+                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                          join (function-call: algebricks:eq, Args:[%0->$$37, %0->$$12])
+                                          -- HYBRID_HASH_JOIN [$$37][$$12]  |PARTITIONED|
+                                            exchange 
+                                            -- HASH_PARTITION_EXCHANGE [$$37]  |PARTITIONED|
+                                              project ([$$37, $$22, $$25, $$26, $$30])
+                                              -- STREAM_PROJECT  |PARTITIONED|
+                                                exchange 
+                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                  join (function-call: algebricks:eq, Args:[%0->$$20, %0->$$36])
+                                                  -- HYBRID_HASH_JOIN [$$20][$$36]  |PARTITIONED|
+                                                    exchange 
+                                                    -- HASH_PARTITION_EXCHANGE [$$20]  |PARTITIONED|
+                                                      select (function-call: algebricks:and, Args:[function-call: algebricks:le, Args:[%0->$$30, 1996-12-31], function-call: algebricks:ge, Args:[%0->$$30, 1995-01-01], function-call: algebricks:le, Args:[%0->$$30, 1996-12-31]])
+                                                      -- STREAM_SELECT  |PARTITIONED|
+                                                        exchange 
+                                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                          data-scan [$$20, $$22, $$25, $$26, $$30]<-[$$20, $$21, $$22, $$23, $$24, $$25, $$26, $$27, $$28, $$29, $$30, $$31, $$32, $$33, $$34, $$35] <- default.lineitem
+                                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                                            exchange 
+                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                              empty-tuple-source
+                                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                    exchange 
+                                                    -- HASH_PARTITION_EXCHANGE [$$36]  |PARTITIONED|
+                                                      data-scan [$$36, $$37]<-[$$36, $$37, $$38, $$39, $$40, $$41, $$42, $$43, $$44] <- default.orders
+                                                      -- DATASOURCE_SCAN  |PARTITIONED|
+                                                        exchange 
+                                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                          empty-tuple-source
+                                                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                            exchange 
+                                            -- HASH_PARTITION_EXCHANGE [$$12]  |PARTITIONED|
+                                              data-scan [$$12, $$15]<-[$$12, $$13, $$14, $$15, $$16, $$17, $$18, $$19] <- default.customer
+                                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                                exchange 
+                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                  empty-tuple-source
+                                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$5]  |PARTITIONED|
+                                      data-scan [$$5, $$8]<-[$$5, $$6, $$7, $$8, $$9, $$10, $$11] <- default.supplier
+                                      -- DATASOURCE_SCAN  |PARTITIONED|
+                                        exchange 
+                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                          empty-tuple-source
+                                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                            exchange 
+                            -- HASH_PARTITION_EXCHANGE [$$4, $$3]  |PARTITIONED|
+                              data-scan [$$4, $$3, $$1, $$2]<-[$$1, $$2, $$3, $$4] <- default.q7_volume_shipping_tmp
+                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  empty-tuple-source
+                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q8_national_market_share.plan b/hivesterix/src/test/resources/optimizerts/results/q8_national_market_share.plan
new file mode 100644
index 0000000..b807a24
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q8_national_market_share.plan
@@ -0,0 +1,190 @@
+write [%0->$$63, %0->$$66]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- SORT_MERGE_EXCHANGE [$$63(ASC) ]  |PARTITIONED|
+    order (ASC, %0->$$63) 
+    -- STABLE_SORT [$$63(ASC)]  |LOCAL|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        project ([$$63, $$66])
+        -- STREAM_PROJECT  |PARTITIONED|
+          assign [$$66] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPDivide, Args:[%0->$$64, %0->$$65]]
+          -- ASSIGN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              group by ([$$63 := %0->$$70]) decor ([]) {
+                        aggregate [$$64, $$65] <- [function-call: hive:sum(FINAL), Args:[%0->$$68], function-call: hive:sum(FINAL), Args:[%0->$$69]]
+                        -- AGGREGATE  |LOCAL|
+                          nested tuple source
+                          -- NESTED_TUPLE_SOURCE  |LOCAL|
+                     }
+              -- EXTERNAL_GROUP_BY[$$70]  |PARTITIONED|
+                exchange 
+                -- HASH_PARTITION_EXCHANGE [$$70]  |PARTITIONED|
+                  group by ([$$70 := %0->$$61]) decor ([]) {
+                            aggregate [$$68, $$69] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen, Args:[function-call: algebricks:eq, Args:[%0->$$2, BRAZIL], %0->$$62, 0.0]], function-call: hive:sum(PARTIAL1), Args:[%0->$$62]]
+                            -- AGGREGATE  |LOCAL|
+                              nested tuple source
+                              -- NESTED_TUPLE_SOURCE  |LOCAL|
+                         }
+                  -- EXTERNAL_GROUP_BY[$$61]  |LOCAL|
+                    exchange 
+                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                      project ([$$61, $$62, $$2])
+                      -- STREAM_PROJECT  |PARTITIONED|
+                        assign [$$61, $$62] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFYear, Args:[%0->$$41], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$26, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$27]]]
+                        -- ASSIGN  |PARTITIONED|
+                          project ([$$2, $$41, $$27, $$26])
+                          -- STREAM_PROJECT  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              join (function-call: algebricks:eq, Args:[%0->$$8, %0->$$1])
+                              -- HYBRID_HASH_JOIN [$$8][$$1]  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$8]  |PARTITIONED|
+                                  project ([$$8, $$41, $$27, $$26])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    exchange 
+                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                      join (function-call: algebricks:eq, Args:[%0->$$23, %0->$$5])
+                                      -- HYBRID_HASH_JOIN [$$23][$$5]  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$23]  |PARTITIONED|
+                                          project ([$$41, $$27, $$26, $$23])
+                                          -- STREAM_PROJECT  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              join (function-call: algebricks:eq, Args:[%0->$$22, %0->$$12])
+                                              -- HYBRID_HASH_JOIN [$$22][$$12]  |PARTITIONED|
+                                                exchange 
+                                                -- HASH_PARTITION_EXCHANGE [$$22]  |PARTITIONED|
+                                                  project ([$$41, $$22, $$23, $$26, $$27])
+                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                    exchange 
+                                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                      join (function-call: algebricks:eq, Args:[%0->$$21, %0->$$37])
+                                                      -- HYBRID_HASH_JOIN [$$21][$$37]  |PARTITIONED|
+                                                        exchange 
+                                                        -- HASH_PARTITION_EXCHANGE [$$21]  |PARTITIONED|
+                                                          data-scan [$$21, $$22, $$23, $$26, $$27]<-[$$21, $$22, $$23, $$24, $$25, $$26, $$27, $$28, $$29, $$30, $$31, $$32, $$33, $$34, $$35, $$36] <- default.lineitem
+                                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                                            exchange 
+                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                              empty-tuple-source
+                                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                        exchange 
+                                                        -- HASH_PARTITION_EXCHANGE [$$37]  |PARTITIONED|
+                                                          project ([$$37, $$41])
+                                                          -- STREAM_PROJECT  |PARTITIONED|
+                                                            exchange 
+                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                              join (function-call: algebricks:eq, Args:[%0->$$46, %0->$$38])
+                                                              -- HYBRID_HASH_JOIN [$$46][$$38]  |PARTITIONED|
+                                                                exchange 
+                                                                -- HASH_PARTITION_EXCHANGE [$$46]  |PARTITIONED|
+                                                                  project ([$$46])
+                                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                                    exchange 
+                                                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                      join (function-call: algebricks:eq, Args:[%0->$$54, %0->$$49])
+                                                                      -- HYBRID_HASH_JOIN [$$54][$$49]  |PARTITIONED|
+                                                                        exchange 
+                                                                        -- HASH_PARTITION_EXCHANGE [$$54]  |PARTITIONED|
+                                                                          project ([$$54])
+                                                                          -- STREAM_PROJECT  |PARTITIONED|
+                                                                            exchange 
+                                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                              join (function-call: algebricks:eq, Args:[%0->$$58, %0->$$56])
+                                                                              -- HYBRID_HASH_JOIN [$$58][$$56]  |PARTITIONED|
+                                                                                exchange 
+                                                                                -- HASH_PARTITION_EXCHANGE [$$58]  |PARTITIONED|
+                                                                                  project ([$$58])
+                                                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                                                    select (function-call: algebricks:eq, Args:[%0->$$59, AMERICA])
+                                                                                    -- STREAM_SELECT  |PARTITIONED|
+                                                                                      exchange 
+                                                                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                                        data-scan [$$59, $$58]<-[$$58, $$59, $$60] <- default.region
+                                                                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                                          exchange 
+                                                                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                                            empty-tuple-source
+                                                                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                                                exchange 
+                                                                                -- HASH_PARTITION_EXCHANGE [$$56]  |PARTITIONED|
+                                                                                  project ([$$56, $$54])
+                                                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                                                    exchange 
+                                                                                    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                                      replicate 
+                                                                                      -- SPLIT  |PARTITIONED|
+                                                                                        exchange 
+                                                                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                                          data-scan []<-[$$54, $$55, $$56, $$57] <- default.nation
+                                                                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                                            exchange 
+                                                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                                              empty-tuple-source
+                                                                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                                        exchange 
+                                                                        -- HASH_PARTITION_EXCHANGE [$$49]  |PARTITIONED|
+                                                                          data-scan [$$49, $$46]<-[$$46, $$47, $$48, $$49, $$50, $$51, $$52, $$53] <- default.customer
+                                                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                            exchange 
+                                                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                              empty-tuple-source
+                                                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                                exchange 
+                                                                -- HASH_PARTITION_EXCHANGE [$$38]  |PARTITIONED|
+                                                                  project ([$$38, $$37, $$41])
+                                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                                    select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$41, 1996-12-31], function-call: algebricks:ge, Args:[%0->$$41, 1995-01-01]])
+                                                                    -- STREAM_SELECT  |PARTITIONED|
+                                                                      exchange 
+                                                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                        data-scan [$$38, $$37, $$41]<-[$$37, $$38, $$39, $$40, $$41, $$42, $$43, $$44, $$45] <- default.orders
+                                                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                          exchange 
+                                                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                            empty-tuple-source
+                                                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                exchange 
+                                                -- HASH_PARTITION_EXCHANGE [$$12]  |PARTITIONED|
+                                                  project ([$$12])
+                                                  -- STREAM_PROJECT  |PARTITIONED|
+                                                    select (function-call: algebricks:eq, Args:[%0->$$16, ECONOMY ANODIZED STEEL])
+                                                    -- STREAM_SELECT  |PARTITIONED|
+                                                      exchange 
+                                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                        data-scan [$$16, $$12]<-[$$12, $$13, $$14, $$15, $$16, $$17, $$18, $$19, $$20] <- default.part
+                                                        -- DATASOURCE_SCAN  |PARTITIONED|
+                                                          exchange 
+                                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                            empty-tuple-source
+                                                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                        exchange 
+                                        -- HASH_PARTITION_EXCHANGE [$$5]  |PARTITIONED|
+                                          data-scan [$$5, $$8]<-[$$5, $$6, $$7, $$8, $$9, $$10, $$11] <- default.supplier
+                                          -- DATASOURCE_SCAN  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              empty-tuple-source
+                                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                exchange 
+                                -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                                  project ([$$1, $$2])
+                                  -- STREAM_PROJECT  |PARTITIONED|
+                                    assign [$$1, $$2, $$3, $$4] <- [%0->$$54, %0->$$55, %0->$$56, %0->$$57]
+                                    -- ASSIGN  |UNPARTITIONED|
+                                      exchange 
+                                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                        replicate 
+                                        -- SPLIT  |PARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            data-scan []<-[$$54, $$55, $$56, $$57] <- default.nation
+                                            -- DATASOURCE_SCAN  |PARTITIONED|
+                                              exchange 
+                                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                empty-tuple-source
+                                                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/q9_product_type_profit.plan b/hivesterix/src/test/resources/optimizerts/results/q9_product_type_profit.plan
new file mode 100644
index 0000000..f57f4a3
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/q9_product_type_profit.plan
@@ -0,0 +1,124 @@
+write [%0->$$53, %0->$$54, %0->$$55]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$53, $$54, $$55])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- SORT_MERGE_EXCHANGE [$$53(ASC), $$54(DESC) ]  |PARTITIONED|
+      order (ASC, %0->$$53) (DESC, %0->$$54) 
+      -- STABLE_SORT [$$53(ASC), $$54(DESC)]  |LOCAL|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          group by ([$$53 := %0->$$58; $$54 := %0->$$59]) decor ([]) {
+                    aggregate [$$55] <- [function-call: hive:sum(FINAL), Args:[%0->$$57]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- EXTERNAL_GROUP_BY[$$58, $$59]  |PARTITIONED|
+            exchange 
+            -- HASH_PARTITION_EXCHANGE [$$58, $$59]  |PARTITIONED|
+              group by ([$$58 := %0->$$48; $$59 := %0->$$51]) decor ([]) {
+                        aggregate [$$57] <- [function-call: hive:sum(PARTIAL1), Args:[%0->$$52]]
+                        -- AGGREGATE  |LOCAL|
+                          nested tuple source
+                          -- NESTED_TUPLE_SOURCE  |LOCAL|
+                     }
+              -- EXTERNAL_GROUP_BY[$$48, $$51]  |LOCAL|
+                exchange 
+                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                  project ([$$48, $$51, $$52])
+                  -- STREAM_PROJECT  |PARTITIONED|
+                    assign [$$51, $$52] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFYear, Args:[%0->$$5], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$29, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$30]], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$22, %0->$$28]]]
+                    -- ASSIGN  |PARTITIONED|
+                      project ([$$5, $$29, $$30, $$28, $$48, $$22])
+                      -- STREAM_PROJECT  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          join (function-call: algebricks:eq, Args:[%0->$$24, %0->$$1])
+                          -- HYBRID_HASH_JOIN [$$24][$$1]  |PARTITIONED|
+                            exchange 
+                            -- HASH_PARTITION_EXCHANGE [$$24]  |PARTITIONED|
+                              project ([$$29, $$30, $$28, $$24, $$48, $$22])
+                              -- STREAM_PROJECT  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  join (function-call: algebricks:eq, Args:[%0->$$25, %0->$$10])
+                                  -- HYBRID_HASH_JOIN [$$25][$$10]  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$25]  |PARTITIONED|
+                                      project ([$$22, $$29, $$30, $$28, $$25, $$24, $$48])
+                                      -- STREAM_PROJECT  |PARTITIONED|
+                                        exchange 
+                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                          join (function-call: algebricks:and, Args:[function-call: algebricks:eq, Args:[%0->$$26, %0->$$20], function-call: algebricks:eq, Args:[%0->$$25, %0->$$19]])
+                                          -- HYBRID_HASH_JOIN [$$26, $$25][$$20, $$19]  |PARTITIONED|
+                                            exchange 
+                                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                              project ([$$48, $$24, $$25, $$26, $$28, $$29, $$30])
+                                              -- STREAM_PROJECT  |PARTITIONED|
+                                                exchange 
+                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                  join (function-call: algebricks:eq, Args:[%0->$$26, %0->$$40])
+                                                  -- HYBRID_HASH_JOIN [$$26][$$40]  |PARTITIONED|
+                                                    exchange 
+                                                    -- HASH_PARTITION_EXCHANGE [$$26]  |PARTITIONED|
+                                                      data-scan [$$26, $$24, $$25, $$28, $$29, $$30]<-[$$24, $$25, $$26, $$27, $$28, $$29, $$30, $$31, $$32, $$33, $$34, $$35, $$36, $$37, $$38, $$39] <- default.lineitem
+                                                      -- DATASOURCE_SCAN  |PARTITIONED|
+                                                        exchange 
+                                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                          empty-tuple-source
+                                                          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                    exchange 
+                                                    -- HASH_PARTITION_EXCHANGE [$$40]  |PARTITIONED|
+                                                      project ([$$40, $$48])
+                                                      -- STREAM_PROJECT  |PARTITIONED|
+                                                        exchange 
+                                                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                          join (function-call: algebricks:eq, Args:[%0->$$43, %0->$$47])
+                                                          -- HYBRID_HASH_JOIN [$$43][$$47]  |PARTITIONED|
+                                                            exchange 
+                                                            -- HASH_PARTITION_EXCHANGE [$$43]  |PARTITIONED|
+                                                              data-scan [$$43, $$40]<-[$$40, $$41, $$42, $$43, $$44, $$45, $$46] <- default.supplier
+                                                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                exchange 
+                                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                  empty-tuple-source
+                                                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                                            exchange 
+                                                            -- HASH_PARTITION_EXCHANGE [$$47]  |PARTITIONED|
+                                                              data-scan [$$47, $$48]<-[$$47, $$48, $$49, $$50] <- default.nation
+                                                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                                                exchange 
+                                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                                  empty-tuple-source
+                                                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                            exchange 
+                                            -- HASH_PARTITION_EXCHANGE [$$20]  |PARTITIONED|
+                                              data-scan [$$20, $$19, $$22]<-[$$19, $$20, $$21, $$22, $$23] <- default.partsupp
+                                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                                exchange 
+                                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                  empty-tuple-source
+                                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                                    exchange 
+                                    -- HASH_PARTITION_EXCHANGE [$$10]  |PARTITIONED|
+                                      project ([$$10])
+                                      -- STREAM_PROJECT  |PARTITIONED|
+                                        select (function-call: hive:org.apache.hadoop.hive.ql.udf.UDFLike, Args:[%0->$$11, %green%])
+                                        -- STREAM_SELECT  |PARTITIONED|
+                                          exchange 
+                                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                            data-scan [$$10, $$11]<-[$$10, $$11, $$12, $$13, $$14, $$15, $$16, $$17, $$18] <- default.part
+                                            -- DATASOURCE_SCAN  |PARTITIONED|
+                                              exchange 
+                                              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                                empty-tuple-source
+                                                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                            exchange 
+                            -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+                              data-scan [$$1, $$5]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.orders
+                              -- DATASOURCE_SCAN  |PARTITIONED|
+                                exchange 
+                                -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                                  empty-tuple-source
+                                  -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u10_nestedloop_join.plan b/hivesterix/src/test/resources/optimizerts/results/u10_nestedloop_join.plan
new file mode 100644
index 0000000..c86d57f
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u10_nestedloop_join.plan
@@ -0,0 +1,24 @@
+write [%0->$$6, %0->$$2, %0->$$5, %0->$$1]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$6, $$2, $$5, $$1])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      join (function-call: algebricks:and, Args:[function-call: algebricks:gt, Args:[%0->$$5, %0->$$1], true])
+      -- NESTED_LOOP  |PARTITIONED|
+        exchange 
+        -- BROADCAST_EXCHANGE  |PARTITIONED|
+          data-scan [$$1, $$2]<-[$$1, $$2, $$3, $$4] <- default.nation
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          data-scan [$$5, $$6]<-[$$5, $$6, $$7, $$8] <- default.nation
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u1_group_by.plan b/hivesterix/src/test/resources/optimizerts/results/u1_group_by.plan
new file mode 100644
index 0000000..188aa6d
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u1_group_by.plan
@@ -0,0 +1,36 @@
+write [%0->$$18, %0->$$19]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$18, $$19])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      group by ([$$17 := %0->$$23]) decor ([]) {
+                aggregate [$$18, $$19] <- [function-call: hive:sum(FINAL), Args:[%0->$$21], function-call: hive:sum(FINAL), Args:[%0->$$22]]
+                -- AGGREGATE  |LOCAL|
+                  nested tuple source
+                  -- NESTED_TUPLE_SOURCE  |LOCAL|
+             }
+      -- EXTERNAL_GROUP_BY[$$23]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$23]  |PARTITIONED|
+          group by ([$$23 := %0->$$9]) decor ([]) {
+                    aggregate [$$21, $$22] <- [function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFAbs, Args:[%0->$$5]], function-call: hive:sum(PARTIAL1), Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$6, function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMinus, Args:[1, %0->$$7]], function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPPlus, Args:[1, %0->$$8]]]]
+                    -- AGGREGATE  |LOCAL|
+                      nested tuple source
+                      -- NESTED_TUPLE_SOURCE  |LOCAL|
+                 }
+          -- EXTERNAL_GROUP_BY[$$9]  |LOCAL|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              project ([$$9, $$5, $$6, $$7, $$8])
+              -- STREAM_PROJECT  |PARTITIONED|
+                select (function-call: algebricks:le, Args:[%0->$$11, 1998-09-02])
+                -- STREAM_SELECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    data-scan [$$5, $$6, $$7, $$8, $$9, $$11]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+                    -- DATASOURCE_SCAN  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        empty-tuple-source
+                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u2_select-project.plan b/hivesterix/src/test/resources/optimizerts/results/u2_select-project.plan
new file mode 100644
index 0000000..4485b36
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u2_select-project.plan
@@ -0,0 +1,16 @@
+write [%0->$$8, %0->$$3, %0->$$4, %0->$$2]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$8, $$3, $$4, $$2])
+  -- STREAM_PROJECT  |PARTITIONED|
+    assign [$$8] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[2, %0->$$1]]
+    -- ASSIGN  |PARTITIONED|
+      select (function-call: algebricks:lt, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$1, 2], 20])
+      -- STREAM_SELECT  |PARTITIONED|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          data-scan []<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7] <- default.supplier
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u3_union.plan b/hivesterix/src/test/resources/optimizerts/results/u3_union.plan
new file mode 100644
index 0000000..c4040f2
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u3_union.plan
@@ -0,0 +1,38 @@
+write [%0->$$17, %0->$$18, %0->$$19, %0->$$20]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+    union ($$8, $$16, $$17) ($$3, $$11, $$18) ($$4, $$12, $$19) ($$2, $$10, $$20)
+    -- UNION_ALL  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        project ([$$8, $$3, $$4, $$2])
+        -- STREAM_PROJECT  |PARTITIONED|
+          assign [$$8] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[2, %0->$$1]]
+          -- ASSIGN  |PARTITIONED|
+            select (function-call: algebricks:gt, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$1, 2], 50])
+            -- STREAM_SELECT  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                data-scan []<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7] <- default.supplier
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        project ([$$16, $$11, $$12, $$10])
+        -- STREAM_PROJECT  |PARTITIONED|
+          assign [$$16] <- [function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[2, %0->$$9]]
+          -- ASSIGN  |PARTITIONED|
+            select (function-call: algebricks:lt, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$9, 2], 20])
+            -- STREAM_SELECT  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                data-scan []<-[$$9, $$10, $$11, $$12, $$13, $$14, $$15] <- default.supplier
+                -- DATASOURCE_SCAN  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    empty-tuple-source
+                    -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u4_join.plan b/hivesterix/src/test/resources/optimizerts/results/u4_join.plan
new file mode 100644
index 0000000..449a601
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u4_join.plan
@@ -0,0 +1,36 @@
+write [%0->$$1, %0->$$2, %0->$$3, %0->$$4]
+-- SINK_WRITE  |PARTITIONED|
+  select (function-call: algebricks:lt, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$1, 2], 20])
+  -- STREAM_SELECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      data-scan [$$1, $$2, $$3, $$4]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7] <- default.supplier
+      -- DATASOURCE_SCAN  |PARTITIONED|
+        exchange 
+        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+          empty-tuple-source
+          -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+write [%0->$$1, %0->$$9, %0->$$10, %0->$$11]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$1, $$9, $$10, $$11])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      join (function-call: algebricks:eq, Args:[%0->$$5, %0->$$1])
+      -- HYBRID_HASH_JOIN [$$5][$$1]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$5]  |PARTITIONED|
+          data-scan [$$5, $$9, $$10, $$11]<-[$$5, $$6, $$7, $$8, $$9, $$10, $$11] <- default.supplier
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+          data-scan [$$1]<-[$$1, $$2, $$3, $$4] <- default.result
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u5_lateral_view.plan b/hivesterix/src/test/resources/optimizerts/results/u5_lateral_view.plan
new file mode 100644
index 0000000..48e624e
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u5_lateral_view.plan
@@ -0,0 +1,14 @@
+write [%0->$$2, %0->$$3, %0->$$8]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$2, $$3, $$8])
+  -- STREAM_PROJECT  |PARTITIONED|
+    unnest $$8 <- function-call: hive:explode, Args:[%0->$$1]
+    -- UNNEST  |PARTITIONED|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        data-scan [$$1, $$2, $$3]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7] <- default.supplier
+        -- DATASOURCE_SCAN  |PARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            empty-tuple-source
+            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u6_limit.plan b/hivesterix/src/test/resources/optimizerts/results/u6_limit.plan
new file mode 100644
index 0000000..b5ed12f
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u6_limit.plan
@@ -0,0 +1,26 @@
+write [%0->$$1, %0->$$4, %0->$$5, %0->$$7]
+-- SINK_WRITE  |UNPARTITIONED|
+  limit 4
+  -- STREAM_LIMIT  |UNPARTITIONED|
+    limit 4
+    -- STREAM_LIMIT  |UNPARTITIONED|
+      exchange 
+      -- SORT_MERGE_EXCHANGE [$$4(ASC) ]  |PARTITIONED|
+        limit 4
+        -- STREAM_LIMIT  |LOCAL|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |LOCAL|
+            order (ASC, %0->$$4) 
+            -- STABLE_SORT [$$4(ASC)]  |LOCAL|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                select (function-call: algebricks:lt, Args:[%0->$$4, 10000])
+                -- STREAM_SELECT  |PARTITIONED|
+                  exchange 
+                  -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                    data-scan [$$1, $$4, $$5, $$7]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9] <- default.orders
+                    -- DATASOURCE_SCAN  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        empty-tuple-source
+                        -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u7_multi_join.plan b/hivesterix/src/test/resources/optimizerts/results/u7_multi_join.plan
new file mode 100644
index 0000000..ab55181
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u7_multi_join.plan
@@ -0,0 +1,52 @@
+write [%0->$$4, %0->$$25, %0->$$28, %0->$$29, %0->$$32]
+-- SINK_WRITE  |PARTITIONED|
+  project ([$$4, $$25, $$28, $$29, $$32])
+  -- STREAM_PROJECT  |PARTITIONED|
+    exchange 
+    -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+      join (function-call: algebricks:eq, Args:[%0->$$1, %0->$$25])
+      -- HYBRID_HASH_JOIN [$$1][$$25]  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$1]  |PARTITIONED|
+          data-scan [$$1, $$4]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7, $$8, $$9, $$10, $$11, $$12, $$13, $$14, $$15, $$16] <- default.lineitem
+          -- DATASOURCE_SCAN  |PARTITIONED|
+            exchange 
+            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+              empty-tuple-source
+              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+        exchange 
+        -- HASH_PARTITION_EXCHANGE [$$25]  |PARTITIONED|
+          project ([$$32, $$25, $$29, $$28])
+          -- STREAM_PROJECT  |PARTITIONED|
+            project ([$$25, $$17, $$28, $$29, $$32])
+            -- STREAM_PROJECT  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                join (function-call: algebricks:eq, Args:[%0->$$26, %0->$$17])
+                -- HYBRID_HASH_JOIN [$$26][$$17]  |PARTITIONED|
+                  exchange 
+                  -- HASH_PARTITION_EXCHANGE [$$26]  |PARTITIONED|
+                    select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$28, 30000], function-call: algebricks:lt, Args:[%0->$$28, 30000]])
+                    -- STREAM_SELECT  |PARTITIONED|
+                      exchange 
+                      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                        data-scan [$$32, $$25, $$26, $$29, $$28]<-[$$25, $$26, $$27, $$28, $$29, $$30, $$31, $$32, $$33] <- default.orders
+                        -- DATASOURCE_SCAN  |PARTITIONED|
+                          exchange 
+                          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                            empty-tuple-source
+                            -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
+                  exchange 
+                  -- HASH_PARTITION_EXCHANGE [$$17]  |PARTITIONED|
+                    project ([$$17])
+                    -- STREAM_PROJECT  |PARTITIONED|
+                      select (function-call: algebricks:and, Args:[function-call: algebricks:lt, Args:[%0->$$17, 5], function-call: algebricks:lt, Args:[%0->$$17, 5]])
+                      -- STREAM_SELECT  |PARTITIONED|
+                        exchange 
+                        -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                          data-scan [$$17]<-[$$17, $$18, $$19, $$20, $$21, $$22, $$23, $$24] <- default.customer
+                          -- DATASOURCE_SCAN  |PARTITIONED|
+                            exchange 
+                            -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                              empty-tuple-source
+                              -- EMPTY_TUPLE_SOURCE  |PARTITIONED|
diff --git a/hivesterix/src/test/resources/optimizerts/results/u8_non_mapred.plan b/hivesterix/src/test/resources/optimizerts/results/u8_non_mapred.plan
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u8_non_mapred.plan
diff --git a/hivesterix/src/test/resources/optimizerts/results/u9_order_by.plan b/hivesterix/src/test/resources/optimizerts/results/u9_order_by.plan
new file mode 100644
index 0000000..7370fcf
--- /dev/null
+++ b/hivesterix/src/test/resources/optimizerts/results/u9_order_by.plan
@@ -0,0 +1,18 @@
+write [%0->$$1, %0->$$2, %0->$$3, %0->$$4]
+-- SINK_WRITE  |PARTITIONED|
+  exchange 
+  -- SORT_MERGE_EXCHANGE [$$2(ASC) ]  |PARTITIONED|
+    order (ASC, %0->$$2) 
+    -- STABLE_SORT [$$2(ASC)]  |LOCAL|
+      exchange 
+      -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+        select (function-call: algebricks:lt, Args:[function-call: hive:org.apache.hadoop.hive.ql.udf.UDFOPMultiply, Args:[%0->$$1, 2], 20])
+        -- STREAM_SELECT  |PARTITIONED|
+          exchange 
+          -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+            data-scan [$$1, $$2, $$3, $$4]<-[$$1, $$2, $$3, $$4, $$5, $$6, $$7] <- default.supplier
+            -- DATASOURCE_SCAN  |PARTITIONED|
+              exchange 
+              -- ONE_TO_ONE_EXCHANGE  |PARTITIONED|
+                empty-tuple-source
+                -- EMPTY_TUPLE_SOURCE  |PARTITIONED|