<?xml version='1.0' encoding='UTF-8'?><?xml-stylesheet href="http://www.blogger.com/styles/atom.css" type="text/css"?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:blogger='http://schemas.google.com/blogger/2008' xmlns:georss='http://www.georss.org/georss' xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr='http://purl.org/syndication/thread/1.0'><id>tag:blogger.com,1999:blog-4025024079216879898</id><updated>2026-03-26T13:17:51.544+05:30</updated><category term="ETL"/><category term="OLAP"/><category term="SQL"/><category term="DOLAP"/><category term="MOLAP"/><category term="ODS"/><category term="OLTP"/><category term="SOLAP"/><category term="WOLAP"/><category term="Informatica"/><category term="COGNOS"/><category term="DBA"/><category term="informatica power center"/><category term="BI"/><category term="Informatica 9"/><category term="Oracle"/><category term="SOA"/><category term="data warehousing"/><category term="lookup transformation"/><category term="Business Intelligence"/><category term="DWh"/><category term="ETL Tool"/><category term="Email-Task"/><category term="Failure-Email"/><category term="Fast Clone"/><category term="IBM"/><category term="MIS"/><category term="SAS"/><category term="Session-Notification"/><category term="Success-Email"/><category term="Teradata"/><category term="Workflow-Notification"/><category term="cahe lookup"/><category term="cloud infrastructure"/><category term="domain"/><category term="flat files"/><category term="informatica 8.x"/><category term="metadata"/><category term="power center"/><category term="reporting"/><category term="sql overrde"/><category term="warehouse"/><category term="Ab Initio"/><category term="Application Services"/><category term="B2B"/><category term="Bombay Stock Exchang"/><category term="CDI"/><category term="CRM"/><category term="DB"/><category term="DB2"/><category term="DTS"/><category term="Daastage"/><category term="Data Cleansing"/><category term="Data Integration"/><category term="Data Modelers"/><category term="DataStage"/><category term="Database Tuning"/><category term="Datacom"/><category term="ETL Performance Tuning"/><category term="FTP Server"/><category term="Graphical Mode"/><category term="HOLAP"/><category term="Hadoop"/><category term="IDMS"/><category term="IMS"/><category term="IS"/><category term="Informatica Upgrade Process"/><category term="Informatica administrator"/><category term="Informatica data quality"/><category term="MDM"/><category term="MPP"/><category term="Mainframe"/><category term="MapR"/><category term="National Stock Exchange"/><category term="New client tools"/><category term="OLTP System"/><category term="OPB"/><category term="OPB tables"/><category term="OPB_MAPPING"/><category term="OPB_SESSLOG"/><category term="OPB_SUBJECT"/><category term="OPB_TDS"/><category term="OSBI"/><category term="PIM"/><category term="Pentaho"/><category term="PowerCenter"/><category term="PowerExchange"/><category term="Pre-Upgrade Tasks"/><category term="ROLAP"/><category term="ROLLUP"/><category term="SMP"/><category term="SQL Transformation"/><category term="Security"/><category term="Server"/><category term="Service Oriented Architecture"/><category term="Session logs size"/><category term="Surrogate Key"/><category term="Talend"/><category term="Ultra Messaging"/><category term="Uncategorized"/><category term="VSAM"/><category term="WebSphere"/><category term="XML"/><category term="XML Transformation"/><category term="XML parser"/><category term="XSD"/><category term="administration console"/><category term="analysis"/><category term="cached lookup"/><category term="cube"/><category term="currval"/><category term="data"/><category term="data mart"/><category term="data masking"/><category term="data migration"/><category term="data mining"/><category term="data mirror"/><category term="data source"/><category term="development"/><category term="dynamic lookup"/><category term="dynamic partitioning"/><category term="expressions"/><category term="grid"/><category term="grid computing"/><category term="informatica 9.x.lookup transormation"/><category term="legacy systems"/><category term="lookup override"/><category term="lookup query"/><category term="master data management"/><category term="mining"/><category term="new features"/><category term="nextval"/><category term="pipeline lookuu"/><category term="power center domain.admin console"/><category term="power center services"/><category term="powerr center 9"/><category term="raw data"/><category term="repository"/><category term="sequence Generator"/><category term="sessions"/><category term="sets"/><category term="shared lookup cache"/><category term="source"/><category term="stagging area"/><category term="stock_code"/><category term="summary data"/><category term="target"/><category term="the domain"/><category term="unchaced lookup"/><title type='text'>ETL TOOLS</title><subtitle type='html'>ETL tools. List of the most popular ETL tools: Informatica, DataStage, BusinessObjects, Cognos, Warehouse Builder,&#xa;  AB Initio, Pentaho,  Microsoft SQL Server 2008, SAS Data Integration Studio.</subtitle><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/posts/default'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default?redirect=false'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/'/><link rel='hub' href='http://pubsubhubbub.appspot.com/'/><link rel='next' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default?start-index=26&amp;max-results=25&amp;redirect=false'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><generator version='7.00' uri='http://www.blogger.com'>Blogger</generator><openSearch:totalResults>212</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-6335579824972553243</id><published>2013-06-15T00:49:00.000+05:30</published><updated>2013-06-15T00:49:41.518+05:30</updated><category scheme="http://www.blogger.com/atom/ns#" term="Bombay Stock Exchang"/><category scheme="http://www.blogger.com/atom/ns#" term="Data Modelers"/><category scheme="http://www.blogger.com/atom/ns#" term="DBA"/><category scheme="http://www.blogger.com/atom/ns#" term="ETL"/><category scheme="http://www.blogger.com/atom/ns#" term="Informatica"/><category scheme="http://www.blogger.com/atom/ns#" term="National Stock Exchange"/><title type='text'>Informatica Tutorial</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
&amp;nbsp;Informatica is a widely used ETL tool for extracting the source data and loading it into the target after applying the required transformation. In the following section, we will try to explain the usage of Informatica in the Data Warehouse environment with an example. Here we are not going into the details of data warehouse design and this tutorial simply provides the overview about how INFORMATICA can be used as an ETL tool.&lt;br /&gt;
&lt;br /&gt;
Note: The exchanges/companies that are explained here is for illustrative purpose only. &lt;br /&gt;
&lt;br /&gt;
Bombay Stock Exchange (BSE) and National Stock Exchange (NSE) are two major stock exchanges in India in which the shares of ABC Corporation and XYZ Private Limited are traded between Mondays through Friday except Holidays.&amp;nbsp; Assume that a software company “KLXY Limited” has taken the project to integrate the data between two exchanges BSE and NSE.&lt;br /&gt;
&lt;br /&gt;
In order to complete this task of integrating the Raw data received&amp;nbsp; from NSE &amp;amp; BSE, KLXY Limited allots responsibilities to Data&amp;nbsp; Modelers, DBAs and ETL Developers. During this entire ETL process,&amp;nbsp; many IT professionals may involve, but we are highlighting the&amp;nbsp; roles of these three personals only for easy understanding and&amp;nbsp; better clarity.&lt;/div&gt;
&lt;ul style=&quot;line-height: 25px;&quot;&gt;
&lt;li&gt;Data Modelers analyze the data from these two sources(Record 
Layout 1 &amp;amp; Record Layout 2), design Data Models, and then generate 
scripts to create necessary tables and the corresponding records.&lt;/li&gt;
&lt;li&gt;DBAs create the databases and tables based on the scripts 
generated by the data modelers.&lt;/li&gt;
&lt;li&gt;ETL developers map the extracted data from source systems and 
load it to target systems after applying the required 
transformations.&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/6335579824972553243/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2013/06/informatica-tutorial.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/6335579824972553243'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/6335579824972553243'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2013/06/informatica-tutorial.html' title='Informatica Tutorial'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-3525793375442171329</id><published>2013-06-15T00:45:00.000+05:30</published><updated>2013-06-15T00:45:01.012+05:30</updated><category scheme="http://www.blogger.com/atom/ns#" term="Application Services"/><category scheme="http://www.blogger.com/atom/ns#" term="domain"/><category scheme="http://www.blogger.com/atom/ns#" term="Graphical Mode"/><category scheme="http://www.blogger.com/atom/ns#" term="Informatica Upgrade Process"/><category scheme="http://www.blogger.com/atom/ns#" term="Pre-Upgrade Tasks"/><category scheme="http://www.blogger.com/atom/ns#" term="Server"/><title type='text'>Informatica Upgrade Process:</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
&lt;b&gt;&lt;span style=&quot;color: blue;&quot;&gt;Informatica Upgrade Process: &lt;/span&gt;&lt;/b&gt;&lt;br /&gt;
  &lt;b&gt;Stages across upgrade can be categorized as below: &lt;/b&gt;&lt;br /&gt;
  &lt;ol&gt;
&lt;li&gt;&lt;b&gt;Upgrading the domain and server file&lt;/b&gt;&lt;b&gt;:&lt;/b&gt;
 run the Informatica server installer and select the upgrade option. The
 domain upgrade wizard installs the server files and configures the 
domain. If the domain has multiple nodes, you must upgrade on all the 
nodes.&lt;/li&gt;
&lt;/ol&gt;
The following table describes the actions that the installer performs when you upgrade Informatica:&lt;br /&gt;
     &lt;br /&gt;
&lt;table border=&quot;1&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; style=&quot;width: 479px;&quot;&gt;&lt;tbody&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;197&quot;&gt;           &lt;strong&gt;Tasks &lt;/strong&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;280&quot;&gt;           &lt;b&gt;Description&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;197&quot;&gt;           &lt;b&gt;1. Installs Informatica.&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;280&quot;&gt;           Installs Informatica directories and files into the new Directory.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;197&quot;&gt;           &lt;b&gt;2. Copies infa_shared directory.&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;280&quot;&gt;           Copies the contents of the infa_shared directory from the existing installation directory into the new installation Directory.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;197&quot;&gt;           &lt;b&gt;3. Copies mm_files directory.&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;280&quot;&gt;           Copies
 the contents of the mm_files directory from the default location in the
 existing installation directory into the New installation directory.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;197&quot;&gt;           &lt;b&gt;4. Upgrades the domain.&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;280&quot;&gt;           Upgrades the domain to run version 9.0.1 application Services.&lt;br /&gt;
            The upgrade retains the user and administrator accounts in The domain.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;197&quot;&gt;           &lt;b&gt;5. Starts Informatica Services.&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;299&quot;&gt;           Starts Informatica Services on the node.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
&lt;b&gt;2.Upgrading the application services:&lt;/b&gt;
 After you upgrade the domain and server files, log in to the 
Administrator Tool and upgrade the application services. The service 
upgrade wizard provides a list of all application that must be upgraded.
 It upgrades the services based on the order required by the dependent 
objects.&lt;br /&gt;
  &lt;b&gt;3.Upgrading the Informatica client:&lt;/b&gt; To upgrade the Informatica client, run the Informatica client installer and Select the upgrade option&lt;br /&gt;
  &lt;strong&gt;&lt;span style=&quot;color: blue;&quot;&gt;Pre-Upgrade Tasks&lt;/span&gt;&lt;/strong&gt;&lt;br /&gt;
  Before you upgrade the domain and server files, complete the following tasks:&lt;br /&gt;
  1. Review the prerequisites.&lt;br /&gt;
  2. Verify the file descriptor settings.&lt;br /&gt;
  3. Verify the configuration of the environment variables used by the installer.&lt;br /&gt;
  4. Clear the configuration of environment variables that pertain to previous installations of Informatica.&lt;br /&gt;
  5. Prepare the domain.&lt;br /&gt;
  6. Prepare the Power Center repository.&lt;br /&gt;
  7. Prepare the Power Center Profiling warehouse.&lt;br /&gt;
  8. Prepare for upgrade from Power Center 8.6.1&lt;br /&gt;
  &lt;ul&gt;
&lt;li&gt; Export Reference Table Manager Data.&lt;/li&gt;
&lt;li&gt; Prepare Metadata Manager.&lt;/li&gt;
&lt;li&gt; Prepare the Data Analyzer repository.&lt;/li&gt;
&lt;/ul&gt;
9. Shut down the domain.&lt;br /&gt;
  &lt;span style=&quot;color: blue;&quot;&gt;&lt;strong&gt;Upgrading the Domain and Server in Graphical Mode: &lt;/strong&gt;&lt;/span&gt;&lt;br /&gt;
  You can upgrade the Informatica domain and server files in graphical mode on Windows or UNIX.&lt;br /&gt;
  1. Verify that your environment meets the minimum system requirements and complete the pre-upgrade tasks.&lt;br /&gt;
  2. Log in to the machine with the same user account that you used to install the previous version.&lt;br /&gt;
  3. Close all other applications.&lt;br /&gt;
  4. To begin the upgrade on Windows, run install.bat from the root directory.&lt;br /&gt;
  To
 begin the upgrade on UNIX, use a shell command line to run install.sh 
from the root directory, and then Select the option for graphical mode 
installation.&lt;br /&gt;
  5. In the Installation Type window, select Upgrade to Informatica 9.0.1 and click next.&lt;br /&gt;
  &lt;ul&gt;
&lt;li&gt;
 The Upgrade Pre-Requisites window displays the upgrade system 
requirements. Verify that all requirements are met before you continue 
the upgrade.&lt;/li&gt;
&lt;/ul&gt;
6. Click Next.&lt;br /&gt;
  7. In the Upgrade Directory window, enter the following directories   &lt;br /&gt;
&lt;table border=&quot;1&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; style=&quot;width: 479px;&quot;&gt;&lt;tbody&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;210&quot;&gt;           &lt;b&gt;Directory&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;267&quot;&gt;           &lt;b&gt;Description&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;210&quot;&gt;           1.Directory of the Informatica&lt;br /&gt;
            Product to upgrade.&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;267&quot;&gt;           Directory that contains the previous version of Power Center that you want to upgrade.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;210&quot;&gt;           2.Directory for Informatica 9.0.1&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;285&quot;&gt;           Directory in which to install Informatica 9.0.1.&lt;br /&gt;
            Enter
 the absolute path for the installation directory. The directory cannot 
be the same as the directory that contains the previous version of Power
 Center. The directory names in the path must not contain spaces or the 
following special characters: @|* $ # ! % ( ) { } [ ] , ; &#39; &lt;br /&gt;
            On Windows, the installation directory must be on the current machine.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
Click Next.&lt;br /&gt;
  The upgrade wizard displays a warning to shut down the Informatica domain before you continue the upgrade.&lt;br /&gt;
  9. Click OK.&lt;br /&gt;
  10. In the Pre-Installation Summary window, review the upgrade information, and click Install to continue.&lt;br /&gt;
  The upgrade wizard installs the Informatica server files to the Informatica 9.0.1 installation directory.&lt;br /&gt;
  11.
 In the Domain Configuration Upgrade window, the upgrade wizard displays
 the database and user account information for the domain configuration 
repository to be upgraded.   &lt;br /&gt;
&lt;table border=&quot;1&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; style=&quot;width: 479px;&quot;&gt;&lt;tbody&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;193&quot;&gt;           &lt;b&gt;Property&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;284&quot;&gt;           &lt;b&gt;Description&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;193&quot;&gt;           &lt;b&gt;Database type&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;284&quot;&gt;           Database for the domain configuration repository.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;193&quot;&gt;           &lt;b&gt;Database user ID&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;284&quot;&gt;           Database user account for the domain configuration repository.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;193&quot;&gt;           &lt;b&gt;User password&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;284&quot;&gt;           Password for the database user account.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;193&quot;&gt;           &lt;b&gt;Tablespace&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;295&quot;&gt;           Displayed for IBM DB2 only. Name of the tablespace for the upgraded domain configuration repository tables.&lt;br /&gt;
            If
 the database of the domain configuration repository that you are 
upgrading does not use a 32 K tablespace, this property is blank. Enter 
the name of a tablespace with a page size of 32 K. In a single-partition
 database, if you do not specify a tablespace&lt;br /&gt;
            name, 
the installer writes the upgraded tables in the default tablespace. The 
default tablespace must be 32 K. In a multi-partition database, you must
 specify a 32 K tablespace.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
The
 upgrade wizard displays the database connection string for the domain 
configuration repository based on how the connection string of the 
previous version was created at installation:&lt;br /&gt;
  &lt;ul&gt;
&lt;li&gt; If the 
previous version used a JDBC URL at installation, the upgrade wizard 
displays the JDBC connection properties, including the database address 
and service name.&lt;/li&gt;
&lt;li&gt; If the previous version used a custom 
JDBC connection string at installation, the upgrade wizard displays the 
custom connection string.&lt;/li&gt;
&lt;li&gt; Optionally, you can specify 
additional JDBC parameters to include in the connection string. To 
provide Additional JDBC parameters, select JDBC parameters and enter a 
valid JDBC parameter string.&lt;/li&gt;
&lt;/ul&gt;
12. Click Test Connection to verify that you can connect to the database, and then click OK to continue.&lt;br /&gt;
  13. Click Next.&lt;br /&gt;
  On
 the Port Configuration Upgrade window, the upgrade wizard displays the 
default port numbers assigned to the domain and node components.&lt;br /&gt;
  14. You can specify new port numbers or use the default port numbers.&lt;br /&gt;
  The following table describes the ports that you can specify:   &lt;br /&gt;
&lt;table border=&quot;1&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; style=&quot;width: 479px;&quot;&gt;&lt;tbody&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;196&quot;&gt;           &lt;b&gt;Port&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;281&quot;&gt;           &lt;b&gt;Description&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;196&quot;&gt;           Service Manager port&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;281&quot;&gt;           Port
 number used by the Service Manager in the node. Client applications and
 the Informatica command line programs use this port to communicate to 
the services in the domain.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;196&quot;&gt;           Informatica Administrator port&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;281&quot;&gt;           Port number used by the Administrator tool.&lt;br /&gt;
            Available if you upgrade a gateway node.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;196&quot;&gt;           Informatica Administrator&lt;br /&gt;
            shutdown port&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;293&quot;&gt;           Port number used by the Administrator tool to listen for shut down commands.&lt;br /&gt;
            Available if you upgrade a gateway node.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
15. Click Next.&lt;br /&gt;
  On
 Windows, the upgrade wizard creates a service to start Informatica. By 
default, the service runs under the same user account as the account 
used for installation. You can run the Windows service under a different
 User account.&lt;br /&gt;
  16. Select whether to run the Windows service under a different user account.&lt;br /&gt;
  The following table describes the properties that you set:   &lt;br /&gt;
&lt;table border=&quot;1&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot;&gt;&lt;tbody&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           &lt;b&gt;Property&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           &lt;b&gt;Description&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Run Informatica under a&lt;br /&gt;
            different user account&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Indicates whether to run the Windows service under a different user account.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           User name&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           User account with which to run the Informatica Windows service.&lt;br /&gt;
            Use the following format: DomainName\UserAccount&lt;br /&gt;
            This user account must have the Act as operating system permission.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Password&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Password for the user account with which to run the Informatica Windows service.&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
17. Click Next.&lt;br /&gt;
  The Post-Upgrade Summary window indicates whether the upgrade completed successfully.&lt;br /&gt;
  18. Click Done.&lt;br /&gt;
  &lt;b&gt;&lt;span style=&quot;color: blue;&quot;&gt;Upgrade the Application Services: &lt;/span&gt;&lt;/b&gt;&lt;br /&gt;
  &lt;b&gt;1. &lt;/b&gt;&lt;b&gt;Configure Informatica Environment Variables&lt;/b&gt;&lt;br /&gt;
  You
 can configure the INFA_JAVA_OPTS, INFA_DOMAINS_FILE, and INFA_HOME 
environment variables to store memory, domain, and location settings.&lt;br /&gt;
    &lt;b&gt;(i) &lt;/b&gt;&lt;b&gt;INFA_JAVA_OPTS&lt;/b&gt;&lt;br /&gt;
  For example, to configure 1 GB of system memory for the Informatica daemon on UNIX in a C shells.&lt;br /&gt;
  setenv INFA_JAVA_OPTS “-Xmx1024m”&lt;br /&gt;
  &lt;b&gt;(ii) &lt;/b&gt;&lt;b&gt;INFA_DOMAINS_FILE&lt;/b&gt;&lt;br /&gt;
  Set
 the value of the INFA_DOMAINS_FILE variable to the path and file name 
of the domains.infa file. If you configure the INFA_DOMAINS_FILE 
variable, you can run infacmd and pmcmd from a directory other than 
/server/bin.&lt;br /&gt;
  Configure the INFA_DOMAINS_FILE variable on the 
machine where you install the Informatica services. On Windows, 
configureINFA_DOMAINS_FILE as a system variable. &lt;br /&gt;
  &lt;b&gt;(iii) &lt;/b&gt;&lt;b&gt;INFA_HOME&lt;/b&gt;&lt;br /&gt;
  Use
 a softlink in UNIX for any of the Informatica directories. To configure
 INFA_HOME so that any Informatica application or service can locate the
 other Informatica components it needs to run, set INFA_HOME to the 
location of the Informatica installation directory.&lt;br /&gt;
  &lt;b&gt;2. &lt;/b&gt;&lt;b&gt;Configure Locale Environment Variables&lt;/b&gt;&lt;br /&gt;
  Use
 the following command to verify that the value for the locale 
environment variable is compatible with the Language settings for the 
machine and the type of code page you want to use for the repository:&lt;br /&gt;
  &lt;b&gt;Eg :&lt;/b&gt; locale –a&lt;br /&gt;
  &lt;b&gt;Locale for Oracle Database Clients&lt;/b&gt;&lt;br /&gt;
  if the value is american_america.UTF8, set the variable in a C shell with the following command:&lt;br /&gt;
  &lt;b&gt;setenv NLS_LANG american_america.UTF8&lt;/b&gt;&lt;br /&gt;
  &lt;strong&gt;&lt;span style=&quot;color: blue;&quot;&gt;Service Upgrade: &lt;/span&gt;&lt;/strong&gt;&lt;br /&gt;
  Use the service upgrade wizard to upgrade services.&lt;br /&gt;
  1. In the Informatica Administrator header area click &lt;b&gt;Manage &lt;/b&gt;&amp;gt; &lt;b&gt;Upgrade&lt;/b&gt;.&lt;br /&gt;
  2. Select the objects to upgrade.&lt;br /&gt;
  3. Click &lt;b&gt;Next&lt;/b&gt;.&lt;br /&gt;
  4. If dependency errors exist, the &lt;b&gt;Dependency Errors &lt;/b&gt;dialog box appears. Review the dependency errors and click &lt;b&gt;OK&lt;/b&gt;. Then, resolve dependency errors and click &lt;b&gt;next&lt;/b&gt;.&lt;br /&gt;
  5. Enter the repository login information. Optionally, choose to use the same login information for all Repositories.&lt;br /&gt;
  6. Click &lt;b&gt;Next&lt;/b&gt;.&lt;br /&gt;
  The service upgrade wizard upgrades each service and displays the status and processing details.&lt;br /&gt;
  7. When the upgrade completes, the &lt;b&gt;Summary &lt;/b&gt;section displays the list of services and their upgrade status.&lt;br /&gt;
  Click each service to view the upgrade details in the &lt;b&gt;Service Details &lt;/b&gt;section.&lt;br /&gt;
  8. Optionally, click &lt;b&gt;Save Report &lt;/b&gt;to save the upgrade details to a file.&lt;br /&gt;
  If you choose not to save the report, you can click &lt;b&gt;Save Previous Report &lt;/b&gt;the next time you launch the Service upgrade wizard.&lt;br /&gt;
  9. Click &lt;b&gt;Close&lt;/b&gt;.&lt;br /&gt;
  10. Restart upgraded services.&lt;br /&gt;
  After you upgrade the PowerCenter Repository Service, you must restart the service and its dependent Services.&lt;br /&gt;
  &lt;strong&gt;&lt;span style=&quot;color: blue;&quot;&gt;Informatica Client Upgrade&lt;/span&gt;&lt;/strong&gt;&lt;br /&gt;
  1. Close all applications.&lt;br /&gt;
  2. Run install.bat from the root directory.&lt;br /&gt;
  The
 Upgrade Pre-Requisites window displays the system requirements. Verify 
that all installation Requirements are met before you continue the 
installation.&lt;br /&gt;
  3. Click Next.&lt;br /&gt;
  On the Select Component window, select the Informatica client you want to upgrade.&lt;br /&gt;
  You can upgrade the following Informatica client applications:&lt;br /&gt;
  &lt;ul&gt;
&lt;li&gt; Informatica Developer&lt;/li&gt;
&lt;li&gt; PowerCenter Client&lt;/li&gt;
&lt;/ul&gt;
If
 both Informatica Developer and PowerCenter Client are installed on the 
machine, you can upgrade the tools in the same process.&lt;br /&gt;
  4. On the Upgrade Directory window, enter the following directories:   &lt;br /&gt;
&lt;table border=&quot;1&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot;&gt;&lt;tbody&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           &lt;b&gt;Directory&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           &lt;b&gt;Description&lt;/b&gt;&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Directory of the Informatica&lt;br /&gt;
            client to upgrade&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Directory that contains the previous version of the Informatica client tool that you want to upgrade&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;tr&gt;         &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Directory for Informatica 9.0.1&lt;br /&gt;
            client tools&lt;br /&gt;
         &lt;/td&gt;          &lt;td valign=&quot;top&quot; width=&quot;319&quot;&gt;           Directory in which to install the Informatica 9.0.1 client tools&lt;br /&gt;
            Enter
 the absolute path for the installation directory. The installation 
directory must be on the current machine. The directory names in the 
path must not contain spaces or the following special characters: @|* $ #
 ! % ( ) { } [ ] , ; &#39;&lt;br /&gt;
         &lt;/td&gt;       &lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
5. Click Next.&lt;br /&gt;
  6. On the Pre-Installation Summary window, review the installation information, and click Install.&lt;br /&gt;
  The installer copies the Informatica client files to the installation directory.&lt;br /&gt;
  The Post-installation Summary window indicates whether the upgrade completed successfully.&lt;br /&gt;
  7. Click Done.&lt;br /&gt;
  &lt;strong&gt;&lt;span style=&quot;color: blue;&quot;&gt;Post Upgrade Tasks: &lt;/span&gt;&lt;/strong&gt;&lt;br /&gt;
  &lt;strong&gt;Informatica Domain&lt;/strong&gt;&lt;br /&gt;
  &lt;ul&gt;
&lt;li&gt; Configure LDAP Connectivity.&lt;/li&gt;
&lt;li&gt; Update the Log Events Directory.&lt;/li&gt;
&lt;li&gt; Update ODBC Data Sources.&lt;/li&gt;
&lt;li&gt; Update Statistics for the Domain Configuration Repository.&lt;/li&gt;
&lt;li&gt; View Log Events from the Previous Informatica Version.&lt;/li&gt;
&lt;/ul&gt;
&lt;strong&gt;Metadata Manager Service&lt;/strong&gt;&lt;br /&gt;
  &lt;ul&gt;
&lt;li&gt; Reload Metadata Manager Resources&lt;/li&gt;
&lt;li&gt; Update the Metadata Manager Properties File&lt;/li&gt;
&lt;li&gt; Reference Table Manager&lt;/li&gt;
&lt;/ul&gt;
For detailed study on version Upgrade, you may go through Documents from informatica corporation on upgrade.&lt;br /&gt;
&lt;div style=&quot;background-color: white; border: medium none; color: black; overflow: hidden; text-align: left; text-decoration: none;&quot;&gt;
&lt;br /&gt;Read more: &lt;a href=&quot;http://informaticatutorials-naveen.blogspot.com/#ixzz2WDfms5w2&quot; style=&quot;color: #003399;&quot;&gt;http://informaticatutorials-naveen.blogspot.com/#ixzz2WDfms5w2&lt;/a&gt;
&lt;br /&gt;Under Creative Commons License: &lt;a href=&quot;http://creativecommons.org/licenses/by-nc/3.0&quot; style=&quot;color: #003399;&quot;&gt;Attribution Non-Commercial&lt;/a&gt;&lt;/div&gt;
&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/3525793375442171329/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2013/06/informatica-upgrade-process.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3525793375442171329'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3525793375442171329'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2013/06/informatica-upgrade-process.html' title='Informatica Upgrade Process:'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-1321472510439662822</id><published>2013-05-15T01:23:00.002+05:30</published><updated>2013-05-15T01:23:40.864+05:30</updated><title type='text'>Design Tip #155 Going Agile? Start with the Bus Matrix</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Many organizations are embracing agile development techniques for their DW/BI implementations. While we strongly concur with agile’s focus on business collaboration to deliver value via incremental initiatives, we’ve also witnessed agile’s “dark side.” Some teams get myopically focused on a narrowly-defined set of business requirements. They extract a limited amount of source data to develop a point solution in a vacuum. The resultant standalone solution can’t be leveraged by other groups and/or integrated with other analytics. The agile deliverable may have been built quickly, so it’s deemed a success. But when organizations lift their heads several years down the agile road, they often discover a non-architected hodgepodge of stovepipe data marts. The agile approach promises to reduce cost (and risk), but some organizations end up spending more on redundant, isolated efforts, coupled with the ongoing cost of fragmented decision-making based on inconsistent data.&lt;br /&gt;&lt;br /&gt;It’s no surprise that a common criticism of the agile approaches for DW/BI development is the lack of planning and architecture, coupled with ongoing governance challenges. We believe the enterprise data warehouse bus matrix (described in our article “The Matrix: Revisited”) is a powerful tool to address these shortcomings. The bus matrix provides a master plan for agile development, plus it identifies the reusable common descriptive dimensions that provide both data consistency and reduced time-to-market delivery in the long run.&lt;br /&gt;&lt;br /&gt;With the right mix of business and IT stakeholders in a room, along with a skilled facilitator, the bus matrix can be produced in relatively short order (measured in days, not weeks). Drafting the bus matrix depends on a solid understanding of the business’s needs. Collaboration is critical to identifying the business’s core processes. It’s a matter of getting the team members to visualize the key measurement events needed for analyses. Involving business representatives and subject matter experts will ensure the team isn’t paralyzed by this task. You’ll likely discover that multiple business areas or departments are interested the same fundamental business processes. As the business is brainstorming the list of measurement events, IT representatives are bringing a dose of reality about the available operational source data and any known limitations.&lt;br /&gt;&lt;br /&gt;Once the matrix has been drafted, the team can then adopt agile development techniques to bring it to life. Business and IT management need to identify the single business process matrix row that’s both a high priority for the business, and highly feasible from a technical perspective. Focusing on just one matrix row minimizes the risk of signing up for an overly ambitious implementation. Most implementation risk comes from biting off too much ETL system design and development; focusing on a single business process, typically captured by a single operational source system, reduces this risk. Incremental development can produce the descriptive dimensions associated with the selected matrix row until sufficient functionality is available and then the dimensional model is released to the business community, as we describe in Design Tip #135: Conformed Dimensions as the Foundation for Agile Date Warehousing.&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/1321472510439662822/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2013/05/design-tip-155-going-agile-start-with.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/1321472510439662822'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/1321472510439662822'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2013/05/design-tip-155-going-agile-start-with.html' title='Design Tip #155 Going Agile? Start with the Bus Matrix'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-3682097360436512317</id><published>2013-05-15T01:22:00.004+05:30</published><updated>2013-05-15T01:22:42.718+05:30</updated><title type='text'>White Paper: Evolving Role of the Enterprise Data Warehouse in the Era of Big Data Analytics</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
The enterprise data warehouse (EDW) community has entered a new realm of meeting new and growing business requirements in the era of big data. Common challenges include:&lt;br /&gt;&lt;br /&gt;
&lt;ol style=&quot;text-align: left;&quot;&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; extreme integration&lt;/li&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; semi- and un-structured data sources&lt;/li&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; petabytes of behavioral and image data accessed through MapReduce/Hadoop&lt;/li&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; massively parallel relational database&lt;/li&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; structural considerations for the EDW to support predictive and other advanced analytics.&lt;/li&gt;
&lt;/ol&gt;
&lt;div style=&quot;text-align: left;&quot;&gt;
&lt;br /&gt;&amp;nbsp;These pressing needs raise more than a few urgent questions, such as:&lt;br /&gt;&lt;/div&gt;
&lt;ol style=&quot;text-align: left;&quot;&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; How do you handle the explosion and diversity of data sources from conventional and non-conventional sources?&lt;/li&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; What new and existing technologies are needed to deepen the understanding of business through big data analytics?&lt;/li&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; What technological requirements are needed to deploy big data projects?&lt;/li&gt;
&lt;li&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp; What potential organizational and cultural impacts should be considered?&lt;/li&gt;
&lt;/ol&gt;
&lt;div style=&quot;text-align: left;&quot;&gt;
&lt;br /&gt;This white paper provides detailed guidance for designing and administering the necessary deployment processes to meet these requirements. Ralph Kimball fills the hole where there is a lack of specific guidance in the industry as to how the EDW needs to respond to the big data analytics challenge, and what design elements are needed to support these new requirements.&lt;/div&gt;
&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/3682097360436512317/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2013/05/white-paper-evolving-role-of-enterprise.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3682097360436512317'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3682097360436512317'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2013/05/white-paper-evolving-role-of-enterprise.html' title='White Paper: Evolving Role of the Enterprise Data Warehouse in the Era of Big Data Analytics'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-7122920347422356836</id><published>2012-10-07T12:46:00.002+05:30</published><updated>2012-10-07T12:47:47.975+05:30</updated><title type='text'>Writing a Proper Data Warehousing Request for Proposal!</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; text-transform: uppercase;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;1. General Information for Vendors
&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l2 level1 lfo5; tab-stops: list .75in; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Lists the Project Vision&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l2 level1 lfo5; tab-stops: list .75in; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Project timelines &lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l2 level1 lfo5; tab-stops: list .75in; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Nature of contract&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l2 level1 lfo5; tab-stops: list .75in; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Scope&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 9pt;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;Reasoning&lt;/u&gt;: Give an overview of project and format for the RFP process.&lt;/i&gt;&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;2.&lt;/span&gt;&lt;/b&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; text-transform: uppercase;&quot;&gt;System Background&lt;/span&gt;&lt;/b&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 9pt;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 9pt;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Description of the Technical Environment&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l9 level1 lfo7; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-fareast-font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;a)&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Description on the Client and Server specifications at the Ministry. &lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l9 level1 lfo7; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-fareast-font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;b)&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Breakdown of&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt; user types for the data warehouse:&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;table border=&quot;1&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;MsoTableGrid&quot; style=&quot;border-bottom: medium none; border-collapse: collapse; border-left: medium none; border-right: medium none; border-top: medium none; margin: auto auto auto 59.4pt; mso-border-alt: solid windowtext .5pt; mso-border-insideh: .5pt solid windowtext; mso-border-insidev: .5pt solid windowtext; mso-padding-alt: 0in 5.4pt 0in 5.4pt; mso-yfti-tbllook: 480; width: 552px;&quot;&gt;
&lt;tbody&gt;
&lt;tr style=&quot;mso-yfti-irow: 0;&quot;&gt;
&lt;td style=&quot;background: #e0e0e0; border-bottom: windowtext 1pt solid; border-left: windowtext 1pt solid; border-right: windowtext 1pt solid; border-top: windowtext 1pt solid; mso-border-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 117pt;&quot; width=&quot;156&quot;&gt;&lt;div align=&quot;center&quot; class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in 0pt; text-align: center;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;User Types&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div align=&quot;center&quot; class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 6pt; text-align: center;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;(Examples Only)&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;td style=&quot;background: #e0e0e0; border-bottom: windowtext 1pt solid; border-left: #ece9d8; border-right: windowtext 1pt solid; border-top: windowtext 1pt solid; mso-border-alt: solid windowtext .5pt; mso-border-left-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 189pt;&quot; width=&quot;252&quot;&gt;&lt;div align=&quot;center&quot; class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in; text-align: center;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Description&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;td style=&quot;background: #e0e0e0; border-bottom: windowtext 1pt solid; border-left: #ece9d8; border-right: windowtext 1pt solid; border-top: windowtext 1pt solid; mso-border-alt: solid windowtext .5pt; mso-border-left-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 1.5in;&quot; width=&quot;144&quot;&gt;&lt;div align=&quot;center&quot; class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in; text-align: center;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Number of Users&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;/tr&gt;
&lt;tr style=&quot;mso-yfti-irow: 1;&quot;&gt;
&lt;td style=&quot;background-color: transparent; border-bottom: windowtext 1pt solid; border-left: windowtext 1pt solid; border-right: windowtext 1pt solid; border-top: #ece9d8; mso-border-alt: solid windowtext .5pt; mso-border-top-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 117pt;&quot; width=&quot;156&quot;&gt;&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Novice User&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;td style=&quot;background-color: transparent; border-bottom: windowtext 1pt solid; border-left: #ece9d8; border-right: windowtext 1pt solid; border-top: #ece9d8; mso-border-alt: solid windowtext .5pt; mso-border-left-alt: solid windowtext .5pt; mso-border-top-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 189pt;&quot; valign=&quot;top&quot; width=&quot;252&quot;&gt;&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Uses “canned” reports and queries.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;td style=&quot;background-color: transparent; border-bottom: windowtext 1pt solid; border-left: #ece9d8; border-right: windowtext 1pt solid; border-top: #ece9d8; mso-border-alt: solid windowtext .5pt; mso-border-left-alt: solid windowtext .5pt; mso-border-top-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 1.5in;&quot; width=&quot;144&quot;&gt;&lt;div align=&quot;center&quot; class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in; text-align: center;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;x&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;/tr&gt;
&lt;tr style=&quot;mso-yfti-irow: 2; mso-yfti-lastrow: yes;&quot;&gt;
&lt;td style=&quot;background-color: transparent; border-bottom: windowtext 1pt solid; border-left: windowtext 1pt solid; border-right: windowtext 1pt solid; border-top: #ece9d8; mso-border-alt: solid windowtext .5pt; mso-border-top-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 117pt;&quot; width=&quot;156&quot;&gt;&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Power User&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;td style=&quot;background-color: transparent; border-bottom: windowtext 1pt solid; border-left: #ece9d8; border-right: windowtext 1pt solid; border-top: #ece9d8; mso-border-alt: solid windowtext .5pt; mso-border-left-alt: solid windowtext .5pt; mso-border-top-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 189pt;&quot; valign=&quot;top&quot; width=&quot;252&quot;&gt;&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Frequent user, edits reports, queries directly and builds cubes for multi-dimensional analysis.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;td style=&quot;background-color: transparent; border-bottom: windowtext 1pt solid; border-left: #ece9d8; border-right: windowtext 1pt solid; border-top: #ece9d8; mso-border-alt: solid windowtext .5pt; mso-border-left-alt: solid windowtext .5pt; mso-border-top-alt: solid windowtext .5pt; padding-bottom: 0in; padding-left: 5.4pt; padding-right: 5.4pt; padding-top: 0in; width: 1.5in;&quot; width=&quot;144&quot;&gt;&lt;div align=&quot;center&quot; class=&quot;MsoNormal&quot; style=&quot;margin: 6pt 0in; text-align: center;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;x&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l9 level1 lfo7; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-fareast-font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;c)&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Software tools for the data warehouse:&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.5in; mso-list: l12 level4 lfo2; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Symbol; font-size: 11pt; mso-bidi-font-family: Symbol; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Symbol;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;·&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;ETL Tools&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.5in; mso-list: l12 level4 lfo2; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Symbol; font-size: 11pt; mso-bidi-font-family: Symbol; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Symbol;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;·&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Report and Query Tools&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.5in; mso-list: l12 level4 lfo2; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Symbol; font-size: 11pt; mso-bidi-font-family: Symbol; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Symbol;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;·&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Data Analysis&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.5in; mso-list: l12 level4 lfo2; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Symbol; font-size: 11pt; mso-bidi-font-family: Symbol; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Symbol;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;·&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Data Visualization Tools&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.75in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -63pt 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;u&gt;Reasoning&lt;/u&gt;: Give an overview of the expected installation. Also specify points of particular&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -63pt 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;interest.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;3.&lt;/span&gt;&lt;/b&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; text-transform: uppercase;&quot;&gt;System Requirements&lt;/span&gt;&lt;/b&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;ul style=&quot;margin-top: 0in;&quot; type=&quot;square&quot;&gt;
&lt;li class=&quot;MsoNormal&quot; style=&quot;color: black; margin: 0in 0in 0pt; mso-list: l12 level1 lfo2; tab-stops: list .5in;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;List of General Requirements&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;→ Give an overview of the data warehouse environment.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;u&gt;Reasoning&lt;/u&gt;: An overview of the rational for the data warehouse implementation.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;ul style=&quot;margin-top: 0in;&quot; type=&quot;square&quot;&gt;
&lt;li class=&quot;MsoNormal&quot; style=&quot;color: black; margin: 0in 0in 0pt; mso-list: l8 level1 lfo3; tab-stops: list .5in;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Mandatory Requirements&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l6 level2 lfo6; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;a)&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;List
 if the software is Client/Server based or web-based. Described what 
functions are available via the Internet browser. Describe what plug-ins&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt; (if any) are necessary for the client machines.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l6 level2 lfo6; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;b)&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;State
 that the vendor must communicate with the Ministry’s database and 
applications. Ask the vendor how they propose to do this.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l6 level2 lfo6; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;c)&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;The vendor must be committed to its products remaining compatible with future versions of &lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;Oracle.&lt;/i&gt; Ask the vendor to offer documents as evidence of commitment to this environment.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l6 level2 lfo6; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt; mso-fareast-font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;d)&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;The vendor’s system must enable the overall optimization of the Ministry’s environment:&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 2in; mso-list: l18 level5 lfo1; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Times New Roman&#39;; font-size: 11pt; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;I)&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Ask for how the vendor’s system minimizes user response time while maximizing the use of system resources.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 2in; mso-list: l18 level5 lfo1; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Times New Roman&#39;; font-size: 11pt; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;ii)&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Ask the vendor to explain the system’s architecture for data selection, processing, formatting.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 2in; mso-list: l18 level5 lfo1; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Times New Roman&#39;; font-size: 11pt; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;iii)&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Ask for evidence that network traffic is minimized and how the need to process large data sets on the client is also minimized.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;u&gt;Reasoning&lt;/u&gt;: Based on user interviews and technical review, these are the must &lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;line-height: 150%; margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; line-height: 150%; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;have features for the data warehouse installation.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;ul style=&quot;margin-top: 0in;&quot; type=&quot;square&quot;&gt;
&lt;li class=&quot;MsoNormal&quot; style=&quot;color: black; margin: 0in 0in 0pt; mso-list: l8 level1 lfo3; tab-stops: list .5in;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Strongly Desired Features&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Examples:&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;→ The vendor’s system must allow users to schedule reports. &lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;→ Does your system permit users to schedule at a certain time&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.25in; text-indent: 0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;and date? Daily? Weekly? Based on an event?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.5in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Describe how scheduling is accomplished and the options for scheduling reports.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.25in;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;u&gt;Reasoning&lt;/u&gt;: Based on the requirements analysis, the items listed here are&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;line-height: 150%; margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; line-height: 150%; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;additional features that will be good to have but not mandatory.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;ul style=&quot;margin-top: 0in;&quot; type=&quot;square&quot;&gt;
&lt;li class=&quot;MsoNormal&quot; style=&quot;color: black; margin: 6pt 0in 0pt; mso-list: l8 level1 lfo3; tab-stops: list .5in;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Desired Features&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;
&lt;/li&gt;
&lt;/ul&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Examples:&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;→ Does your system possess a redundant query control?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.5in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;This
 feature returns a saved version of a report whenever an identical 
report has been executed earlier – the report, output format, and raw 
data all remain constant in the interim. Describe how this works in your
 system.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt; &lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;What are the limits in terms of how far back reports are archived?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;u&gt;Reasoning&lt;/u&gt;: Following the rational of “Strongly Desired Features” above, this&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;section lists a lower desired set of requirements.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l1 level1 lfo8; tab-stops: list .75in; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Product Profile&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Product name&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Product description&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Current release level&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Date current release level was available&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Projected date for the next release candidate&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Current products install base&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Industry verticals of installation&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Number of installations&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l7 level3 lfo11; tab-stops: list 99.0pt; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Breakdown of installations/users&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;u&gt;Reasoning&lt;/u&gt;: Gauge vendor’s product specifications and maturity.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l0 level1 lfo9; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Consulting&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Describe the level of involvement the consultants will have at each stage of the project.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;What is the experience of the consultants?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Location of Consultants.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please forward resumes of consultants.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;u&gt;Reasoning&lt;/u&gt;: Evaluate experience and competency of the vendor’s team.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;Also enquire on the consultants’ geographic location, this will help evaluate&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;possible response rate.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l15 level1 lfo12; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Business Partners&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.75in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Ask the vendor to state which products are fully integrated to your solution.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Describe the interconnectivity and interface requirements.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;/i&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;:
 Enquire what other products can integrate well with the solution, e.g. 
Crystal Reports, Cognos. Also, the vendor will state the ease at which 
integration can occur.&lt;/span&gt;&lt;/i&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l4 level1 lfo13; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Full feature of the business intelligence solution and products&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Ask what makes the product stand out.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.75in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;/i&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;: This will give the vendor a chance to distinguish their products.&lt;/span&gt;&lt;/i&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.75in 0pt 0.75in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;Further business intelligence capabilities such as OLAP, Data mining can be evaluated.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l14 level1 lfo14; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Application areas&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Describe the areas the solution supports i.e. budgeting, financial planning etc.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.25in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;/i&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;: Evaluate which areas the data warehousing solution can serve.&lt;/span&gt;&lt;/i&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.25in 0pt 0.75in;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;The
 “Mandatory Features” section above will inform the vendor the area 
which are necessary for the Ministry. This section gives further 
information on the vendors solution and possible areas which the 
Ministry can use in the future.&lt;/span&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.25in;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.25in;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.25in;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.25in;&quot;&gt;
&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l11 level1 lfo15; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Web-based and standalone clients&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.25in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Query
 if the solution supports both web-based and standalone clients. Ask 
which functionality is not supported in both versions (i.e. what works 
in a standalone client and not web-based and vice versa)&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.25in 0pt 1in;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;/i&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;:
 The architecture is of a fundamental importance. This section will 
enquire if the vendor’s solution will match the Ministry requirements.&lt;/span&gt;&lt;/i&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-list: l5 level1 lfo16; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Pricing and Licensing Model&lt;/span&gt;&lt;/span&gt;&lt;/b&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How much does the product cost, including initial training, support and consulting?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-list: l16 level2 lfo10; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;/i&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;: Gauge the cost of the solution and other indirect costs.&lt;/span&gt;&lt;/i&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt;&quot;&gt;
&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial; mso-bidi-font-size: 9.0pt;&quot;&gt;&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-layout-grid-align: none; mso-list: l10 level1 lfo17; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;System Installation and Tool Administration&lt;/span&gt;&lt;/b&gt; &lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Examples:&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.5in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please describe the platform and system requirements of your product.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.5in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;For
 web-based clients, are there specific requirements on the type and 
version of browser that may be used (e. g. Netscape vs. Internet 
Explorer)?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please
 describe the overall architecture of your product (e. g. Client/ 
Server, two-tier/ three-tier architecture, etc.), including how your 
product functions in a geographically distributed environment.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please describe the steps involved in installing your product.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please list the administrative functions that are included in your product (e. g. defining roles, backup/ recovery, etc.).&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.5in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How
 do tool administrators detect when your product requires additional 
hardware or requires human intervention (e. g. to restart a process, 
etc.).&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How often are major/ minor software upgrades available? How would we be notified of a new software release?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please
 describe how standalone (I. e. non-web-based) clients are distributed 
to end-user desktops. How are software upgrades distributed to 
end-users?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l19 level2 lfo18; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 8pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Are there ways to make the upgrade process transparent to the end-users?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1.25in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;:
 The above questions and statements will give further information on the
 vendor’s solution regarding the installation and administration of your
 product.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-layout-grid-align: none; mso-list: l13 level1 lfo19; text-indent: 0in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;Security&lt;/span&gt;&lt;/b&gt; &lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -9pt 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How are end-users and administrators authenticated to your product?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How do end-users and administrators gain access to reports, data sets, etc. (e. g. via roles)?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please fully describe how reports and data are protected from unauthorized users. &lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Please describe how row-level access to the data is attained.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;What level of encryption is used on passwords, data, reports, etc.?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -0.25in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How does the existence of a firewall change the security architecture? &lt;span style=&quot;mso-spacerun: yes;&quot;&gt;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How does the web-based client differ in terms of security from the standalone client?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.75in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;/i&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;: Questions on security must also pass the security criteria set by regulators or industry standards .&lt;/span&gt;&lt;/i&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 1in; mso-layout-grid-align: none; mso-list: l3 level1 lfo20; text-indent: -0.25in;&quot;&gt;
&lt;span style=&quot;color: black; font-family: Wingdings; mso-bidi-font-family: Wingdings; mso-fareast-font-family: Wingdings;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;&lt;span style=&quot;font-size: x-small;&quot;&gt;§&lt;/span&gt;&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;b style=&quot;mso-bidi-font-weight: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;Performance and Scalability&lt;/span&gt;&lt;/b&gt; &lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;Does the product supports a substantial increase in data size, frequency and complexity of end-user queries?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 99pt; mso-layout-grid-align: none; mso-list: l17 level2 lfo4; text-indent: -27pt;&quot;&gt;
&lt;span style=&quot;color: black; font-family: &#39;Courier New&#39;; font-size: 11pt; mso-fareast-font-family: &#39;Courier New&#39;;&quot;&gt;&lt;span style=&quot;mso-list: Ignore;&quot;&gt;o&lt;span style=&quot;font: 7pt &#39;Times New Roman&#39;;&quot;&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&lt;/span&gt;&lt;/span&gt;&lt;/span&gt; &lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;span style=&quot;font-family: Arial;&quot;&gt;How is performance measured within your product?&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.25in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in -45pt 0pt 1in; mso-layout-grid-align: none;&quot;&gt;
&lt;span style=&quot;font-family: Arial;&quot;&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;u&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;Reasoning&lt;/span&gt;&lt;/u&gt;&lt;/i&gt;&lt;i style=&quot;mso-bidi-font-style: normal;&quot;&gt;&lt;span style=&quot;color: black; font-size: 11pt; mso-bidi-font-family: Arial;&quot;&gt;: Evaluation of how the product can scale to future expectations.&lt;/span&gt;&lt;/i&gt;&lt;/span&gt;&lt;/div&gt;
&lt;div class=&quot;MsoNormal&quot; style=&quot;margin: 0in 0in 0pt 0.5in;&quot;&gt;
&lt;br /&gt;&lt;/div&gt;
&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/7122920347422356836/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/writing-proper-data-warehousing-request.html#comment-form' title='1 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/7122920347422356836'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/7122920347422356836'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/writing-proper-data-warehousing-request.html' title='Writing a Proper Data Warehousing Request for Proposal!'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>1</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-4363410073422322770</id><published>2012-10-07T12:45:00.000+05:30</published><updated>2012-10-07T12:45:38.634+05:30</updated><title type='text'>Replicating Transactions Between Microsoft SQL Server and Oracle Database Using Oracle GoldenGate</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Most Oracle technology professionals who are interested in data 
replication are familiar with Oracle Streams. Until 2009, Streams was 
the recommended and most popular Oracle technology for data 
distribution.&lt;br /&gt;
 In July 2009, Oracle acquired GoldenGate, a 
provider of database replication software. The company is now 
encouraging its customers to use Oracle GoldenGate (which is part of the Oracle Fusion Middleware
 family) for their data replication needs in new applications. Oracle&#39;s 
statement of direction regarding Oracle Streams says that product  “will
 continue to be supported, but will not be actively  enhanced.”&lt;br /&gt;
 In
 this article we will build a simple transaction replication example 
using Oracle GoldenGate, in order to get acquainted with this new 
technology.&lt;br /&gt;
 &lt;h3&gt;
&lt;strong&gt;Oracle GoldenGate Architecture&lt;/strong&gt;&lt;/h3&gt;
GoldenGate
 v11 enables transaction level replication among heterogeneous 
platforms. It supports Oracle Database, IBM DB2, Microsoft SQL Server, 
MySQL, Teradata, and many other platforms. (It also supports access 
through a generic ODBC driver.)&lt;br /&gt;
 The most important components 
that we need to be familiar with are the Extract and Replicat processes.
 The Extract process runs at the source system and captures the data 
changes. The Replicat is running at the target machine and is 
responsible for applying the changes to the target database.&lt;br /&gt;
 &lt;div align=&quot;center&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f1&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460265.jpg&quot; /&gt;&lt;/div&gt;
There
 are two common configurations for the Extract process. The so called 
“initial load” is used for populating the target database with an exact 
copy of the source data (i.e. Extract is fetching all data from the 
source database and typically runs only once). Then the “change 
synchronization” can take place. In “change synchronization” 
configuration the Extract is constantly monitoring the source database 
and captures all changes on the fly.&lt;br /&gt;
 In this demonstration we 
will setup a Microsoft SQL Server 2008 as a source database, configure 
and perform an initial load and then start an Extract process in a 
change synchronization mode. In order to show that this replication is 
truly heterogeneous, we will run SQL Server on Windows XP and  Oracle 
Database 11&lt;em&gt;g&lt;/em&gt; Release 2 on Oracle Linux 5. As a prerequisite I 
will assume that you already have a clean installation of SQL Server 
2008 on the Windows box and Oracle Database on the Linux machine.&lt;br /&gt;
 We will start building the demonstration scenario by installing GoldenGate. Let&#39;s start with the Windows box.&lt;br /&gt;
 &lt;h3&gt;
GoldenGate for SQL Server Installation on Windows XP&lt;/h3&gt;
First you need a copy of Oracle GoldenGate v11 for SQL Server. You can download it from http://edelivery.oracle.com
 (Oracle Fusion Middleware → Microsoft Windows x32 → Oracle GoldenGate 
for Non Oracle Database v11). The serial number of the media pack that 
you need is V22241-01.&lt;br /&gt;
&lt;br /&gt;
Extract the downloaded archive in a location where you want to have 
the Oracle GoldenGate installation (in this example – C:\GG). Then  open
 a command prompt, go to the directory, and launch GGSCI (the GoldenGate
 command interface):&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;C:\GG&amp;gt;&lt;strong&gt;ggsci&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Oracle GoldenGate Command Interpreter for ODBC&lt;br /&gt; Version 11.1.1.0.0 Build 078&lt;br /&gt; Windows (optimized), Microsoft SQL Server on Jul 28 2010 18:55:52&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 1&amp;gt;&lt;/span&gt;&lt;/div&gt;
Next execute the command &lt;span style=&quot;font-family: Courier New;&quot;&gt;CREATE SUBDIRS&lt;/span&gt; to create the Oracle GoldenGate working directories.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (MSSQL) 1&amp;gt; &lt;strong&gt;CREATE SUBDIRS&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Creating subdirectories under current directory C:\GG&lt;br /&gt; &lt;br /&gt; Parameter files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirprm: created&lt;br /&gt; Report files &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;C:\GG\dirrpt: created&lt;br /&gt; Checkpoint files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirchk: created&lt;br /&gt; Process status files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirpcs: created&lt;br /&gt; SQL script files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirsql: created&lt;br /&gt; Database definitions files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirdef: created&lt;br /&gt; Extract data files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirdat: created&lt;br /&gt; Temporary files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirtmp: created&lt;br /&gt; Veridata files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirver: created&lt;br /&gt; Veridata Lock files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirver\lock: created&lt;br /&gt; Veridata Out-Of-Sync files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirver\oos: created&lt;br /&gt; Veridata Out-Of-Sync XML files C:\GG\dirver\oosxml: created&lt;br /&gt; Veridata Parameter files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirver\params: created&lt;br /&gt; Veridata Report files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirver\report: created&lt;br /&gt; Veridata Status files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirver\status: created&lt;br /&gt; Veridata Trace files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirver\trace: created&lt;br /&gt; Stdout files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; C:\GG\dirout: created&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 2&amp;gt; &lt;strong&gt;EXIT&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; C:\GG&amp;gt;&lt;/span&gt;&lt;/div&gt;
According
 to the official documentation GGSCI supports up to 300 concurrent 
Extract and Replicat processes per Oracle GoldenGate instance. There is 
however a single process that is responsible for controlling the other 
processes; it&#39;s called the Manager process. Although you can run this 
process manually it is a good practice to install it as service - 
otherwise it will stop when the user that started it logs off.&lt;br /&gt;
 To add the Manager process as a Windows service execute the &lt;span style=&quot;font-family: Courier New;&quot;&gt;INSTALL ADDSERVICE&lt;/span&gt; command within the GoldenGate installation directory.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;C:\GG&amp;gt;&lt;strong&gt;INSTALL ADDSERVICE&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Service &#39;GGSMGR&#39; created.&lt;br /&gt; &lt;br /&gt; Install program terminated normally.&lt;br /&gt; &lt;br /&gt; C:\GG&amp;gt;&lt;/span&gt;&lt;/div&gt;
This pretty much completes the Windows installation. Let&#39;s move on to the Linux machine.&lt;br /&gt;
 &lt;h3&gt;
GoldenGate for Oracle Installation on Oracle Linux 5&lt;/h3&gt;
Installing Oracle GoldenGate on Linux is not much different than the 
installation that you did on top of Windows XP. You will need to 
download the media pack of GoldenGate for Oracle on Linux (V22228-01). 
You create an installation directory and unzip the archive there. In 
this example, I use the /u01/app/oracle/gg directory, as our ORACLE_BASE
 is pointing to /u01/app/oracle. After this is done you have to set the 
PATH and LD_LIBRARY_PATH environment variables like this: &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;[oracle@oradb ~]$ &lt;strong&gt;export PATH=$PATH:$ORACLE_BASE/gg&lt;/strong&gt;&lt;br /&gt; [oracle@oradb ~]$ &lt;strong&gt;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_BASE/gg&lt;/strong&gt;&lt;/span&gt;&lt;/div&gt;
Let&#39;s start GGSCI and execute &lt;span style=&quot;font-family: Courier New;&quot;&gt;CREATE SUBDIRS&lt;/span&gt;.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;[oracle@oradb ggs]$ &lt;strong&gt;cd $ORACLE_BASE/gg&lt;/strong&gt;&lt;br /&gt; [oracle@oradb gg]$ &lt;strong&gt;./ggsci&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Oracle GoldenGate Command Interpreter for Oracle&lt;br /&gt; Version 11.1.1.0.0 Build 078&lt;br /&gt; Linux, x86, 32bit (optimized), Oracle 11 on Jul 28 2010 13:22:25&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 1&amp;gt; &lt;strong&gt;CREATE SUBDIRS&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Creating subdirectories under current directory /u01/app/oracle/gg&lt;br /&gt; &lt;br /&gt; Parameter files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirprm: created&lt;br /&gt; Report files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirrpt: created&lt;br /&gt; Checkpoint files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirchk: created&lt;br /&gt; Process status files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirpcs: created&lt;br /&gt; SQL script files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirsql: created&lt;br /&gt; Database definitions files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirdef: created&lt;br /&gt; Extract data files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirdat: created&lt;br /&gt; Temporary files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirtmp: created&lt;br /&gt; Veridata files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirver: created&lt;br /&gt; Veridata Lock files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirver/lock: created&lt;br /&gt; Veridata Out-Of-Sync files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirver/oos: created&lt;br /&gt; Veridata Out-Of-Sync XML files /u01/app/oracle/gg/dirver/oosxml: created&lt;br /&gt; Veridata Parameter files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirver/params: created&lt;br /&gt; Veridata Report files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirver/report: created&lt;br /&gt; Veridata Status files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirver/status: created&lt;br /&gt; Veridata Trace files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirver/trace: created&lt;br /&gt; Stdout files&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirout: created&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 2&amp;gt; &lt;strong&gt;EXIT&lt;/strong&gt;&lt;br /&gt; [oracle@oradb gg]$ &lt;/span&gt;&lt;/div&gt;
Installation on the Linux machine is now completed.&lt;br /&gt;
 &lt;h3&gt;
Preparing the Source Database&lt;/h3&gt;
Next step is to create a new database in  SQL Server and populate it 
with some sample data. The name of the database will be EMP. You can 
create it by launching SQL Server Management Studio, right-clicking on &lt;strong&gt;Databases&lt;/strong&gt;, and selecting &lt;strong&gt;New Database&lt;/strong&gt;.&lt;br /&gt; &lt;br /&gt; &lt;div style=&quot;text-align: center;&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f3&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460267.png&quot; /&gt;&lt;br /&gt; &amp;nbsp;&lt;/div&gt;
&lt;div style=&quot;text-align: left;&quot;&gt;
Type EMP in the database name field and click &lt;strong&gt;OK&lt;/strong&gt;, leaving all other options by default.&lt;/div&gt;
Let&#39;s
 add a new database schema (HRSCHEMA), a table (EMP) and a few test 
records in the newly created database. This will be accomplished by 
running the following SQL:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;set ansi_nulls on&lt;br /&gt; go&lt;/span&gt;&lt;br /&gt;
 &lt;span style=&quot;font-family: Courier New;&quot;&gt;set quoted_identifier on&lt;br /&gt; go&lt;/span&gt;&lt;br /&gt;
 &lt;span style=&quot;font-family: Courier New;&quot;&gt;create schema hrschema&lt;br /&gt; go&lt;/span&gt;&lt;br /&gt;
 &lt;span style=&quot;font-family: Courier New;&quot;&gt;create table [hrschema].[emp] (&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; [id] [smallint] not null,&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; [first_name] varchar(50) not null,&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; [last_name] varchar(50) not null,&lt;br /&gt; constraint [emp_pk] primary key clustered (&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; [id] asc&lt;br /&gt; ) with (pad_index = off, statistics_norecompute=off, ignore_dup_key=off, allow_row_locks=on, allow_page_locks=on) on [primary]&lt;br /&gt; ) on [primary]&lt;/span&gt;&lt;br /&gt;
 &lt;span style=&quot;font-family: Courier New;&quot;&gt;go&lt;/span&gt;&lt;br /&gt;
 &lt;br /&gt;
 &lt;span style=&quot;font-family: Courier New;&quot;&gt;-- TEST DATA&lt;/span&gt;&lt;br /&gt;
 &lt;span style=&quot;font-family: Courier New;&quot;&gt;INSERT INTO [hrschema].[emp] ([id], [first_name], [last_name]) VALUES (1,&#39;Dave&#39;,&#39;Mustaine&#39;)&lt;br /&gt; INSERT INTO [hrschema].[emp] ([id], [first_name], [last_name]) VALUES (2,&#39;Chris&#39;,&#39;Broderick&#39;)&lt;br /&gt; INSERT INTO [hrschema].[emp] ([id], [first_name], [last_name]) VALUES (3,&#39;David&#39;,&#39;Ellefson&#39;)&lt;br /&gt; INSERT INTO [hrschema].[emp] ([id], [first_name], [last_name]) VALUES (4,&#39;Shawn&#39;,&#39;Drover&#39;)&lt;br /&gt; GO&lt;/span&gt;&lt;/div&gt;
First create a new query (by right-clicking on the database name and selecting &lt;strong&gt;New Query&lt;/strong&gt;). Then  paste-in the SQL text above and hit F5 to execute it.&lt;br /&gt;
 &lt;div style=&quot;text-align: center;&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f4&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460268.png&quot; /&gt;&lt;/div&gt;
Now,
 in order for Oracle GoldenGate to be able to access the EMP database, 
you have to create an ODBC data source for it. Let&#39;s go to &lt;strong&gt;Control Panel -&amp;gt; Administrative Tools -&amp;gt; Data Sources (ODBC)&lt;/strong&gt; and add a new System DSN. Select &lt;strong&gt;SQL Server&lt;/strong&gt;
 as the database driver and name the data source HR. You point the 
source to the local SQL Server (MSSQL) and fill in the login 
credentials. The data source summary should be similar to this:&lt;br /&gt;
 &lt;div style=&quot;text-align: center;&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f5&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460269.png&quot; /&gt;&lt;/div&gt;
Now
 it&#39;s time to enable Oracle GoldenGate to acquire the transaction 
information for the EMP table from the transaction logs. Again you will 
be using GGSCI:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;C:\GG&amp;gt;&lt;strong&gt;ggsci.exe&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Oracle GoldenGate Command Interpreter for ODBC&lt;br /&gt; Version 11.1.1.0.0 Build 078&lt;br /&gt; Windows (optimized), Microsoft SQL Server on Jul 28 2010 18:55:52&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 1&amp;gt; &lt;strong&gt;DBLOGIN SOURCEDB HR&lt;/strong&gt;&lt;br /&gt; Successfully logged into database.&lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 2&amp;gt; &lt;strong&gt;ADD TRANDATA HRSCHEMA.EMP&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Logging of supplemental log data is enabled for table hrschema.emp&lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 3&amp;gt;&lt;/span&gt;&lt;/div&gt;
Because
 the data types in Oracle and SQL Server are different you have to 
establish a data type conversion. GoldenGate provides a dedicated tool 
called DEFGEN that generates data definitions and is referenced by 
Oracle GoldenGate processes when source and target tables have 
dissimilar definitions. Before running DEFGEN you have to create a 
parameter file for it, specifying which tables should the tool inspect 
and where to place the type definitions file after the tables are 
inspected. You can create such a parameter file using the EDIT PARAMS 
command within GGSCI.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (MSSQL) 3&amp;gt; &lt;strong&gt;EDIT PARAMS DEFGEN&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 4&amp;gt;&lt;/span&gt;&lt;/div&gt;
This
 creates an empty parameter file named DEFGEN.PRM and located in the 
DIRPRM folder of your GoldenGate installation. Put the following 
contents inside the file:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;defsfile c:\gg\dirdef\emp.def&lt;br /&gt; sourcedb hr&lt;br /&gt; table hrschema.emp;&lt;/span&gt;&lt;/div&gt;
The
 parameters are pretty self explanatory.&amp;nbsp; We want DEFGEN to inspect the 
EMP table inside the HRSCHEMA and to place a definitions file named 
EMP.DEF in the DIRDEF sub-directory. Let&#39;s invoke DEFGEN and examine its
 output.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;C:\GG&amp;gt;&lt;strong&gt;defgen paramfile c:\gg\dirprm\defgen.prm&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; ***********************************************************************&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Oracle GoldenGate Table Definition Generator for ODBC&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Version 11.1.1.0.0 Build 078&lt;br /&gt; &amp;nbsp;&amp;nbsp; Windows (optimized), Microsoft SQL Server on Jul 28 2010 19:16:56&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Starting at 2011-04-08 14:41:06&lt;br /&gt; ***********************************************************************&lt;br /&gt; &lt;br /&gt; Operating System Version:&lt;br /&gt; Microsoft Windows XP Professional, on x86&lt;br /&gt; Version 5.1 (Build 2600: Service Pack 3)&lt;br /&gt; &lt;br /&gt; Process id: 2948&lt;br /&gt; &lt;br /&gt; ***********************************************************************&lt;br /&gt; **&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Running with the following parameters&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; **&lt;br /&gt; ***********************************************************************&lt;br /&gt; defsfile c:\gg\dirdef\emp.def&lt;br /&gt; sourcedb hr&lt;br /&gt; table hrschema.emp;&lt;br /&gt; Retrieving definition for HRSCHEMA.EMP&lt;br /&gt; &lt;br /&gt; Definitions generated for 1 tables in c:\gg\dirdef\emp.def&lt;br /&gt; &lt;/span&gt;&lt;br /&gt; &lt;span style=&quot;font-family: Courier New;&quot;&gt;C:\GG&amp;gt;&lt;/span&gt;&lt;/div&gt;
If you bother to check the contents of EMP.DEF it will be something similar to this:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;*&lt;br /&gt; * Definitions created/modified&amp;nbsp; 2011-07-07 10:27&lt;br /&gt; *&lt;br /&gt; *&amp;nbsp; Field descriptions for each column entry:&lt;br /&gt; *&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 1&amp;nbsp;&amp;nbsp;&amp;nbsp; Name&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 2&amp;nbsp;&amp;nbsp;&amp;nbsp; Data Type&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 3&amp;nbsp;&amp;nbsp;&amp;nbsp; External Length&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 4&amp;nbsp;&amp;nbsp;&amp;nbsp; Fetch Offset&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 5&amp;nbsp;&amp;nbsp;&amp;nbsp; Scale&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 6&amp;nbsp;&amp;nbsp;&amp;nbsp; Level&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 7&amp;nbsp;&amp;nbsp;&amp;nbsp; Null&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 8&amp;nbsp;&amp;nbsp;&amp;nbsp; Bump if Odd&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 9&amp;nbsp; &amp;nbsp;&amp;nbsp;Internal Length&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 10&amp;nbsp;&amp;nbsp;&amp;nbsp; Binary Length&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 11&amp;nbsp;&amp;nbsp;&amp;nbsp; Table Length&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 12&amp;nbsp;&amp;nbsp;&amp;nbsp; Most Significant DT&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 13&amp;nbsp;&amp;nbsp;&amp;nbsp; Least Significant DT&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 14&amp;nbsp;&amp;nbsp;&amp;nbsp; High Precision&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 15&amp;nbsp;&amp;nbsp;&amp;nbsp; Low Precision&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 16&amp;nbsp;&amp;nbsp;&amp;nbsp; Elementary Item&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 17&amp;nbsp;&amp;nbsp;&amp;nbsp; Occurs&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 18&amp;nbsp;&amp;nbsp;&amp;nbsp; Key Column&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp; 19 &amp;nbsp;&amp;nbsp;&amp;nbsp;Sub Data Type&lt;br /&gt; *&lt;br /&gt; *&lt;br /&gt; Definition for table HRSCHEMA.EMP&lt;br /&gt; Record length: 121&lt;br /&gt; Syskey: 0&lt;br /&gt; Columns: 3&lt;br /&gt; id&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 134&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 23&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0&amp;nbsp; 0&amp;nbsp; 0 1 0&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 8&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 8&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 8 0 0 0 0 1&amp;nbsp;&amp;nbsp;&amp;nbsp; 0 1 0&lt;br /&gt; first_name&amp;nbsp;&amp;nbsp; 64&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 50&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 11&amp;nbsp; 0&amp;nbsp; 0 1 0&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 50&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 50&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0 0 0 0 0 1&amp;nbsp;&amp;nbsp;&amp;nbsp; 0 0 0&lt;br /&gt; last_name&amp;nbsp;&amp;nbsp;&amp;nbsp; 64&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 50&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 66&amp;nbsp; 0&amp;nbsp; 0 1 0&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 50&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 50&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0 0 0 0 0 1&amp;nbsp;&amp;nbsp;&amp;nbsp; 0 0 0&lt;br /&gt; End of definition&lt;/span&gt;&lt;/div&gt;
It basically lists all tables/columns and describes the native database types using a more general definitions.&lt;br /&gt;
 Now
 you have to copy the EMP.DEF file to the target machine as it should be
 available to the Replicat process. The Replicat will have to do another
 conversion. It will map the more general types back to database 
specific types (but this time the types will correspond to the ones used
 by the target database). For copying the file you can use FTP/SFTP or 
SCP transfer. (Personally I am using a free FTP/SFTP/SCP client called 
WinSCP to copy EMP.DEF from the Windows box to the 
/u01/app/oracle/gg/dirdef folder on the Linux machine.)&lt;br /&gt;
 &lt;div style=&quot;text-align: center;&quot;&gt;
&amp;nbsp;&lt;img alt=&quot;oracle-sqlserver-goldengate-f6&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460270.png&quot; /&gt;&lt;strong&gt;&lt;br /&gt; &lt;/strong&gt;&lt;/div&gt;
&lt;h3&gt;
Preparing the Target Database&lt;/h3&gt;
After the source preparations are finalized it&#39;s time to move to the 
target machine. Let&#39;s create a schema (GG_USER) and a table where the 
Replicat process can apply the transactions coming from the source. &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;[oracle@oradb ~]$ &lt;strong&gt;sqlplus / as sysdba&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; SQL*Plus: Release 11.2.0.1.0 Production on Fri Apr 8 14:11:49 2011&lt;br /&gt; &lt;br /&gt; Copyright (c) 1982, 2009, Oracle.&amp;nbsp; All rights reserved.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; Connected to:&lt;br /&gt; Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - Production&lt;br /&gt; With the Partitioning, OLAP, Data Mining and Real Application Testing options&lt;br /&gt; &lt;br /&gt; SQL&amp;gt; &lt;strong&gt;create user gg_user identified by welcome1;&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; User created.&lt;br /&gt; &lt;br /&gt; SQL&amp;gt; &lt;strong&gt;grant connect, resource,select any dictionary to gg_user;&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Grant succeeded.&lt;br /&gt; &lt;br /&gt; SQL&amp;gt;&lt;/span&gt;&lt;/div&gt;
The EMP table should reside in GG_USER&#39;s schema:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;SQL&amp;gt; &lt;strong&gt;create table gg_user.emp (id number not null, first_name varchar2(50), last_name varchar2(50));&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Table created.&lt;br /&gt; &lt;br /&gt; SQL&amp;gt; &lt;/span&gt;&lt;/div&gt;
You
 have to keep in mind that should the Replicat process apply data to 
tables residing in different schemas, GG_USER will need additional 
privileges (like SELECT ANY TABLE, LOCK ANY TABLE etc.). A detailed list
 of the required privileges is listed in the official documentation.&lt;br /&gt;
 &lt;strong&gt;Setting Up the Extract &amp;amp; Replicat for Initial Data Load&lt;/strong&gt;&lt;br /&gt;
 Let&#39;s start by setting up the Extract process on the source machine. 
Name the process INEXT (for INitial EXTract). Next create a parameters 
file in the same manner as the parameter file that you created for the 
DEFGEN utility. The filename will be INEXT.PRM.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;C:\GG&amp;gt;&lt;strong&gt;ggsci.exe&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Oracle GoldenGate Command Interpreter for ODBC&lt;br /&gt; Version 11.1.1.0.0 Build 078&lt;br /&gt; Windows (optimized), Microsoft SQL Server on Jul 28 2010 18:55:52&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 1&amp;gt; &lt;strong&gt;EDIT PARAMS INEXT&lt;/strong&gt;&lt;/span&gt;&lt;/div&gt;
Paste the following contents to INEXT.PRM:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;SOURCEISTABLE&lt;br /&gt; SOURCEDB HR&lt;br /&gt; RMTHOST ORADB, MGRPORT 7809&lt;br /&gt; RMTFILE /u01/app/oracle/gg/dirdat/ex&lt;br /&gt; TABLE hrschema.emp;&lt;/span&gt;&lt;/div&gt;
The
 SOURCEISTABLE parameter instructs the Extract process to get the data 
directly from the table instead of the transaction logs. This is the 
behavior that we want in order to do a full extraction. SOURCEDB points 
to the database that contains the data. RMTHOST and MGRPORT specify the 
remote machine and Manager&#39;s port. RMTFILE specifies the file to which 
the extracted data will be written.&lt;br /&gt;
 That&#39;s all the configuration 
you need for the initial data extraction. Let&#39;s move to the Linux 
machine and configure the initial data loading.&lt;br /&gt;
 You have to deal with the Manager process first: Start GGSCI and create a parameter file called MGR.PRM.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;[oracle@oradb gg]$ &lt;strong&gt;./ggsci &lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Oracle GoldenGate Command Interpreter for Oracle&lt;br /&gt; Version 11.1.1.0.0 Build 078&lt;br /&gt; Linux, x86, 32bit (optimized), Oracle 11 on Jul 28 2010 13:22:25&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; &lt;/span&gt;&lt;br /&gt; &lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (oradb) 1&amp;gt; &lt;strong&gt;EDIT PARAM MGR&lt;/strong&gt;&lt;/span&gt;&lt;/div&gt;
There is only one line that you have to put in MGR.PRM:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;PORT 7809&lt;/span&gt;&lt;/div&gt;
After saving the file execute the START MANAGER command within GGSCI and see if the manager starts correctly.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (oradb) 2&amp;gt; &lt;strong&gt;START MANAGER&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Manager started.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 3&amp;gt; &lt;/span&gt;&lt;/div&gt;
Next
 you have to set the parameters for the Replicat process. So create a 
new parameters file and name it INLOAD (for INitial LOADing).&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (oradb) 3&amp;gt; &lt;strong&gt;EDIT PARAMS INLOAD&lt;/strong&gt;&lt;/span&gt;&lt;/div&gt;
Put the following contents inside INLOAD.PRM:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;SPECIALRUN&lt;br /&gt; END RUNTIME&lt;br /&gt; USERID gg_user, PASSWORD welcome1&lt;br /&gt; EXTFILE /u01/app/oracle/gg/dirdat/ex&lt;br /&gt; SOURCEDEFS /u01/app/oracle/gg/dirdef/emp.def&lt;br /&gt; MAP hrschema.emp, TARGET gg_user.emp;&lt;/span&gt;&lt;/div&gt;
The
 SPECIALRUN parameter defines an initial-loading process (it is a 
one-time loading that doesn&#39;t use checkpoints). The next line of the 
file instructs the Replicat process to terminate after the loading is 
finished. &lt;br /&gt; &lt;br /&gt; Next you provide the database user and password, the
 extract file, and the table definition. The final parameter, MAP, 
instructs the Replicat to remap the table HRSCHEMA.EMP to GG_USER.EMP.&lt;br /&gt;
 &lt;h3&gt;
&lt;strong&gt;Running the Initial Extract and Loading&lt;/strong&gt;&lt;/h3&gt;
The
 databases and processes are finally configured. Now you can start the 
initial loading and see the data replication in action.&lt;br /&gt;
 First you
 have to run the Extract process; it will fetch all data residing at the
 SQL Server&#39;s EMP table and write it to the RMTFILE 
(/u01/app/oracle/gg/dirdat/ex) at the Linux host.&lt;br /&gt;
 Start the Extract by running the EXTRACT command and providing parameters and log file as command line arguments.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;C:\GG&amp;gt;&lt;strong&gt;extract paramfile dirprm\inext.prm reportfile dirrpt\inext.rpt&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; ***********************************************************************&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Oracle GoldenGate Capture for ODBC&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Version 11.1.1.0.0 Build 078&lt;br /&gt; &amp;nbsp;&amp;nbsp; Windows (optimized), Microsoft SQL Server on Jul 28 2010 19:22:00&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Starting at 2011-04-08 15:57:48&lt;br /&gt; ***********************************************************************&lt;br /&gt; &lt;br /&gt; Operating System Version:&lt;br /&gt; Microsoft Windows XP Professional, on x86&lt;br /&gt; Version 5.1 (Build 2600: Service Pack 3)&lt;br /&gt; &lt;br /&gt; Process id: 556&lt;br /&gt; &lt;br /&gt; Description:&lt;br /&gt; &lt;br /&gt; ***********************************************************************&lt;br /&gt; **&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;Running with the following parameters&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; **&lt;br /&gt; ***********************************************************************&lt;br /&gt; &lt;br /&gt; 2011-04-08 15:57:48&amp;nbsp; INFO&amp;nbsp;&amp;nbsp;&amp;nbsp; OGG-01017&amp;nbsp; Wildcard resolution set to IMMEDIATE bec&lt;br /&gt; ause SOURCEISTABLE is used.&lt;br /&gt; &lt;br /&gt; Using the following key columns for source table HRSCHEMA.EMP: id.&lt;br /&gt; &lt;br /&gt; CACHEMGR virtual memory values (may have been adjusted)&lt;br /&gt; CACHEBUFFERSIZE:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 64K&lt;br /&gt; CACHESIZE:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 1G&lt;br /&gt; CACHEBUFFERSIZE (soft max):&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 4M&lt;br /&gt; CACHEPAGEOUTSIZE (normal):&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 4M&lt;br /&gt; PROCESS VM AVAIL FROM OS (min):&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 1.85G&lt;br /&gt; CACHESIZEMAX (strict force to disk):&amp;nbsp;&amp;nbsp; 1.62G&lt;br /&gt; &lt;br /&gt; Database Version:&lt;br /&gt; Microsoft SQL Server&lt;br /&gt; Version 10.00.1600&lt;br /&gt; ODBC Version 03.52.0000&lt;br /&gt; &lt;br /&gt; Driver Information:&lt;br /&gt; SQLSRV32.DLL&lt;br /&gt; Version 03.85.1132&lt;br /&gt; ODBC Version 03.52&lt;br /&gt; &lt;br /&gt; Database Language and Character Set:&lt;br /&gt; &lt;br /&gt; Warning: Unable to determine the application and database codepage settings.&lt;br /&gt; Please refer to user manual for more information.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; 2011-04-08 15:57:49&amp;nbsp; INFO&amp;nbsp;&amp;nbsp;&amp;nbsp; OGG-01478&amp;nbsp; Output file /u01/app/oracle/gg/dirdat/ex&lt;br /&gt; &amp;nbsp;is using format RELEASE 10.4/11.1.&lt;br /&gt; &lt;br /&gt; 2011-04-08 15:57:55&amp;nbsp; INFO&amp;nbsp;&amp;nbsp;&amp;nbsp; OGG-01226&amp;nbsp; Socket buffer size set to 27985 (flush s&lt;br /&gt; ize 27985).&lt;br /&gt; &lt;br /&gt; Processing table HRSCHEMA.EMP&lt;br /&gt; &lt;br /&gt; ***********************************************************************&lt;br /&gt; *&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; ** Run Time Statistics **&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; *&lt;br /&gt; ***********************************************************************&lt;br /&gt; &lt;br /&gt; Report at 2011-04-08 15:57:55 (activity since 2011-04-08 15:57:49)&lt;br /&gt; &lt;br /&gt; Output to /u01/app/oracle/gg/dirdat/ex:&lt;br /&gt; &lt;br /&gt; From Table HRSCHEMA.EMP:&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; inserts:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 4&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; updates:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; deletes:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; discards:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; C:\GG&amp;gt; &lt;/span&gt;&lt;/div&gt;
The run time statistics shows that 4 rows were successfully extracted. Let&#39;s move to the Linux machine and start the Replicat. &lt;br /&gt; &lt;br /&gt;
 To apply the extracted data to the target database, run the replicat 
command and provide the prepared parameters file. Here is an excerpt 
from the replicat run:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;[oracle@oradb gg]$ &lt;strong&gt;./replicat paramfile dirprm/inload.prm&lt;/strong&gt; &lt;br /&gt; &lt;br /&gt; ***********************************************************************&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Oracle GoldenGate Delivery for Oracle&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Version 11.1.1.0.0 Build 078&lt;br /&gt; &amp;nbsp;&amp;nbsp; Linux, x86, 32bit (optimized), Oracle 11 on Jul 28 2010 15:42:30&lt;br /&gt; &lt;br /&gt; Copyright (C) 1995, 2010, Oracle and/or its affiliates. All rights reserved.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Starting at 2011-04-11 12:52:52&lt;br /&gt; ***********************************************************************&lt;br /&gt; &lt;br /&gt; Operating System Version:&lt;br /&gt; Linux&lt;br /&gt; Version #1 SMP Mon Mar 29 20:06:41 EDT 2010, Release 2.6.18-194.el5&lt;br /&gt; Node: oradb&lt;br /&gt; Machine: i686&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; soft limit&amp;nbsp;&amp;nbsp; hard limit&lt;br /&gt; Address Space Size&amp;nbsp;&amp;nbsp; :&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&lt;br /&gt; Heap Size&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; :&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&lt;br /&gt; File Size&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; :&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&lt;br /&gt; CPU Time&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; :&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&amp;nbsp;&amp;nbsp;&amp;nbsp; unlimited&lt;br /&gt; &lt;br /&gt; Process id: 23383&lt;br /&gt; &lt;br /&gt; Description: &lt;br /&gt; &lt;br /&gt; ***********************************************************************&lt;br /&gt; **&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; Running with the following parameters&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; **&lt;br /&gt; ***********************************************************************&lt;br /&gt; SPECIALRUN&lt;br /&gt; END RUNTIME&lt;br /&gt; USERID gg_user, PASSWORD ********&lt;br /&gt; EXTFILE /u01/app/oracle/gg/dirdat/ex&lt;br /&gt; SOURCEDEFS /u01/app/oracle/gg/dirdef/emp.def&lt;br /&gt; MAP hrschema.emp, TARGET gg_user.emp;&lt;br /&gt; &lt;br /&gt; CACHEMGR virtual memory values (may have been adjusted)&lt;br /&gt; CACHEBUFFERSIZE:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 64K&lt;br /&gt; CACHESIZE:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 512M&lt;br /&gt; CACHEBUFFERSIZE (soft max):&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 4M&lt;br /&gt; CACHEPAGEOUTSIZE (normal):&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;4M&lt;br /&gt; PROCESS VM AVAIL FROM OS (min):&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 1G&lt;br /&gt; CACHESIZEMAX (strict force to disk):&amp;nbsp;&amp;nbsp;&amp;nbsp; 881M&lt;br /&gt; &lt;br /&gt; Database Version:&lt;br /&gt; Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - Production&lt;br /&gt; PL/SQL Release 11.2.0.1.0 - Production&lt;br /&gt; CORE 11.2.0.1.0 Production&lt;br /&gt; TNS for Linux: Version 11.2.0.1.0 - Production&lt;br /&gt; NLSRTL Version 11.2.0.1.0 - Production&lt;br /&gt; &lt;br /&gt; ...&lt;br /&gt; &lt;br /&gt; Reading /u01/app/oracle/gg/dirdat/ex, current RBA 1210, 4 records&lt;br /&gt; &lt;br /&gt; Report at 2011-04-11 12:53:15 (activity since 2011-04-11 12:53:14)&lt;br /&gt; &lt;br /&gt; From Table HRSCHEMA.EMP to GG_USER.EMP:&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; inserts:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 4&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; updates:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; deletes:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; #&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; discards:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 0&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; Last log location read:&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; FILE:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; /u01/app/oracle/gg/dirdat/ex&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; RBA:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 1210&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; TIMESTAMP: 2011-04-08 16:57:55.433993&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; EOF:&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; NO&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; READERR:&amp;nbsp;&amp;nbsp; 400&lt;br /&gt; &lt;br /&gt; ...&lt;br /&gt; &lt;br /&gt; [oracle@oradb gg]$ &lt;/span&gt;&lt;/div&gt;
You can login to the Oracle Database as GG_USER and check the contents of the EMP table.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;SQL&amp;gt; select id, first_name from emp;&lt;br /&gt; &lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; ID FIRST_NAME&lt;br /&gt; ---------- --------------------------------------------------&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;1 Dave&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;2 Chris&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;3 David&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;4 Shawn&lt;br /&gt; &lt;br /&gt; SQL&amp;gt;&lt;/span&gt;&lt;/div&gt;
The EMP table now contains a copy of all records that were originally inserted at the SQL Server.&lt;br /&gt;
 &lt;h3&gt;
Live Data Capture Configuration&lt;/h3&gt;
With
 the Oracle database having an exact copy of the SQL Server&#39;s EMP table,
 it is now time to create a live capture configuration. We will setup 
the Extract and Replicat processes to run all the time and continuously 
transmit/apply changes of the EMP table.&lt;br /&gt;
 In order to implement 
the new configuration you will have to create new parameter files for 
extracting and replicating. First however you have to perform two 
additional steps on  SQL Server: Confirm that the database is set to 
Full Recovery and then take a full database backup of the EMP database. 
Failure to take a full backup will prevent the Extract process from 
capturing live data changes.&lt;br /&gt;
 You can easily check if the EMP database is in Full Recovery by right-clicking on it, selecting &lt;strong&gt;Properties&lt;/strong&gt;, and inspecting the value of Recovery model.&lt;br /&gt;
 &lt;div style=&quot;text-align: center;&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f7&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460271.png&quot; /&gt;&lt;/div&gt;
Taking a full backup is done in a few clicks as well. Right-click on the EMP database, select &lt;strong&gt;Tasks&lt;/strong&gt; and then &lt;strong&gt;Back Up&lt;/strong&gt;. This brings up the backup database dialog. We confirm that the Backup type is set to &lt;strong&gt;Full&lt;/strong&gt; and then click &lt;strong&gt;OK&lt;/strong&gt;.&lt;br /&gt;
 &lt;div style=&quot;text-align: center;&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f8&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460272.png&quot; /&gt;&lt;/div&gt;
If everything goes well in a couple of seconds we should see a notification that the operation is successful.&lt;br /&gt;
 &lt;div style=&quot;text-align: center;&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f9&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460273.png&quot; /&gt;&lt;/div&gt;
Time
 to set the processes. We will start by configuring a Manager process on
 the Windows machine. We skipped this step in the initial loading phase,
 but in the new configuration that you are building the Extract process 
must be running all the time. This requires an active manager process 
that will perform resource management functions. You will follow the 
same steps as with the Linux box configuration.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (MSSQL) 1&amp;gt; &lt;strong&gt;EDIT PARAM MGR&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 2&amp;gt;&lt;/span&gt;&lt;/div&gt;
Put a single line in MGR.PRM to set the port of the Manager instance.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;PORT 7809&lt;/span&gt;&lt;/div&gt;
Then we start the Manager.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (MSSQL) 2&amp;gt; &lt;strong&gt;START MANAGER&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Starting Manager as service (&#39;GGSMGR&#39;)...&lt;br /&gt; Service started.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 3&amp;gt;&lt;/span&gt;&lt;/div&gt;
Let&#39;s
 create a new extract group for mining the transaction logs and name it 
MSEXT. Then set a destination where the data changes should be written 
(/u01/app/oracle/gg/dirdat/ms).&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (MSSQL) 3&amp;gt; &lt;strong&gt;ADD EXTRACT MSEXT, TRANLOG, BEGIN NOW&lt;/strong&gt;&lt;br /&gt; EXTRACT added.&lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 4&amp;gt; &lt;strong&gt;ADD RMTTRAIL /u01/app/oracle/gg/dirdat/ms, EXTRACT MSEXT&lt;/strong&gt;&lt;br /&gt; RMTTRAIL added.&lt;/span&gt;&lt;/div&gt;
You will also need a new parameters file.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (MSSQL) 5&amp;gt; &lt;strong&gt;EDIT PARAMS MSEXT&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 6&amp;gt;&lt;/span&gt;&lt;/div&gt;
Type the following lines in it:&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;EXTRACT MSEXT&lt;br /&gt; SOURCEDB HR&lt;br /&gt; TRANLOGOPTIONS MANAGESECONDARYTRUNCATIONPOINT&lt;br /&gt; RMTHOST ORADB, MGRPORT 7809&lt;br /&gt; RMTTRAIL /u01/app/oracle/gg/dirdat/ms&lt;br /&gt; TABLE HRSCHEMA.EMP;&lt;/span&gt;&lt;/div&gt;
The
 difference here is that we are omitting the SOURCEISTABLE parameter and
  introducing a new one: TRANLOGOPTIONS MANAGESECONDARYTRUNCATIONPOINT. 
This options tells the Extract process to routinely check and delete the
 CDC capture job, resulting in better performance and less occupied 
space for captured data.&lt;br /&gt;
 This is all you need on the source machine. Let&#39;s move on and configure the replication at the target.&lt;br /&gt;
 On
 the Linux box you have to start by creating a checkpoint table. 
Checkpoints are used to store the current read/write positions of the 
Extract and Replicat processes. They prevent loss of data and insure 
that the processes can recover from faults (for example if the network 
between the source and target machine goes down for a moment). Create a 
table that holds checkpoints information by issuing the ADD CHECKPOINT 
command at the target.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (oradb) 1&amp;gt; &lt;strong&gt;DBLOGIN USERID gg_user, PASSWORD welcome1&lt;/strong&gt;&lt;br /&gt; Successfully logged into database.&lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 2&amp;gt; &lt;strong&gt;ADD CHECKPOINTTABLE gg_user.chkpt&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Successfully created checkpoint table GG_USER.CHKPT.&lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 3&amp;gt;&lt;/span&gt;&lt;/div&gt;
Let&#39;s add a Replicat group and setup its parameters.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (oradb) 3&amp;gt; &lt;strong&gt;ADD REPLICAT MSREP, EXTTRAIL /u01/app/oracle/gg/dirdat/ms, CHECKPOINTTABLE gg_user.chkpt&lt;/strong&gt;&lt;br /&gt; REPLICAT added.&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 4&amp;gt; &lt;strong&gt;EDIT PARAMS MSREP&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 5&amp;gt;&lt;/span&gt;&lt;/div&gt;
As a final step  put the following lines in MSREP.PRM.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;REPLICAT MSREP&lt;br /&gt; SOURCEDEFS /u01/app/oracle/gg/dirdef/emp.def&lt;br /&gt; USERID gg_user, PASSWORD welcome1&lt;br /&gt; MAP hrschema.emp, TARGET gg_user.emp;&lt;/span&gt;&lt;/div&gt;
The configuration is now completed. Let&#39;s start the Extract and Replicat and do some testing.&lt;br /&gt;
 &lt;h3&gt;
Starting and Testing  Online Transaction Replication&lt;/h3&gt;
To start the Extract process, use GGSCI and execute the &lt;span style=&quot;font-family: Courier New;&quot;&gt;START EXTRACT&lt;/span&gt; command.&amp;nbsp; &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (MSSQL) 1&amp;gt; &lt;strong&gt;START EXTRACT MSEXT&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Sending START request to MANAGER (&#39;GGSMGR&#39;) ...&lt;br /&gt; EXTRACT MSEXT starting&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (MSSQL) 2&amp;gt;&lt;/span&gt;&lt;/div&gt;
On the  Linux machine use the &lt;span style=&quot;font-family: Courier New;&quot;&gt;START REPLICAT&lt;/span&gt; command respectively.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;GGSCI (oradb) 1&amp;gt; &lt;strong&gt;START REPLICAT MSREP&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; Sending START request to MANAGER ...&lt;br /&gt; REPLICAT MSREP starting&lt;br /&gt; &lt;br /&gt; &lt;br /&gt; GGSCI (oradb) 2&amp;gt; &lt;/span&gt;&lt;/div&gt;
Let&#39;s login as GG_USER and see the contents of the EMP table.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;SQL&amp;gt; &lt;strong&gt;select id, first_name from emp;&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; ID FIRST_NAME&lt;br /&gt; ---------- --------------------------------------------------&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;1 Dave&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;2 Chris&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;3 David&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;4 Shawn&lt;br /&gt; &lt;br /&gt; SQL&amp;gt;&lt;/span&gt;&lt;/div&gt;
Nothing
 new here. The data hasn&#39;t change since the last time we checked. Let&#39;s 
go back to the SQL Server machine and run the following query, adding 
one additional row to the EMP table at the source.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;BEGIN TRAN&lt;br /&gt; INSERT INTO [hrschema].[emp] ([id], [first_name], [last_name]) VALUES (9,&#39;Gar&#39;,&#39;Samuelson&#39;)&lt;br /&gt; COMMIT TRAN&lt;/span&gt;&lt;/div&gt;
&lt;div style=&quot;text-align: center;&quot;&gt;
&lt;img alt=&quot;oracle-sqlserver-goldengate-f10&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/460274.png&quot; /&gt;&lt;/div&gt;
Let&#39;s go back to the Oracle Database and see if anything changed there.&lt;br /&gt;
 &lt;div&gt;
&lt;span style=&quot;font-family: Courier New;&quot;&gt;SQL&amp;gt; &lt;strong&gt;select id, first_name from emp;&lt;/strong&gt;&lt;br /&gt; &lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; ID FIRST_NAME&lt;br /&gt; ---------- --------------------------------------------------&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;1 Dave&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;2 Chris&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;3 David&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;4 Shawn&lt;br /&gt; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;9 Samuelson&lt;br /&gt; &lt;br /&gt; SQL&amp;gt;&lt;/span&gt;&lt;/div&gt;
Congratulations! The data is getting replicated in a sub-second interval, reflecting every single transaction.&lt;br /&gt;
 &lt;h3&gt;
Conclusion&lt;/h3&gt;
In this article we performed a very basic demonstration of some of the 
Oracle GoldenGate features. You should be aware that there are many 
different topologies and usage scenarios. For instance, you can 
configure GoldenGate to perform bidirectional replication (where two 
different databases simultaneously replicate changes to each other). 
There are also broadcast (where a single database replicates to multiple
 targets) and consolidation (many databases replicate to a central&amp;nbsp; 
database) configurations. One can use GoldenGate to implement query 
offloading (separating reporting from production, but avoiding the time 
gap of the traditional data warehouses). It is also a powerful solution 
for implementing zero downtime upgrades and database migrations. &lt;br /&gt; &lt;br /&gt; &lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/4363410073422322770/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/replicating-transactions-between.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/4363410073422322770'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/4363410073422322770'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/replicating-transactions-between.html' title='Replicating Transactions Between Microsoft SQL Server and Oracle Database Using Oracle GoldenGate'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-2537778781202755155</id><published>2012-10-07T12:42:00.003+05:30</published><updated>2012-10-07T12:42:59.348+05:30</updated><title type='text'>Legions of remote workers create opportunities and concerns for businesses.</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
It’s no longer unusual for employees to take the office with them, 
whether that means on the road, at home or just in their pockets. Mobile devices have
 allowed many people to unplug from their workstations and access 
company resources wherever there’s an Internet connection, creating a 
decentralized work force that’s running at all hours.&lt;br /&gt;

This shift in business culture is made possible by technological 
advances. Improved functionalities such as faster processing speed, GPS capability,
 larger memory stores and robust applications make it increasingly 
realistic for organizations to have a legion of remote workers and to 
offer more flexible work schedules.&lt;br /&gt;

The potential benefits to businesses are many. Greater mobility can 
help with employee retention as more people like to be untethered, but 
it also aids an enterprise to stay nimble through being “always on.” 
Questions can be answered at any hour, which is a boon in a global 
economy where customers might be 12 time zones away. Having fewer 
in-office staff members can also reduce expenses as organizations 
downsize their office space.&lt;br /&gt;

Such advantages have led companies to increase their mobile 
capabilities and turn more of their cubicle dwellers into on-the-go 
employees. According to IDC’s “Worldwide Mobile Worker Population 
2007-2011 Forecast,” this demographic could exceed 1 billion, or 30% of 
the global work force, by 2011. This is up from 758.6 million in 2006.&lt;br /&gt;

Many staff members remotely access information from a data warehouse,
 since an application can “push” the data to a mobile device. Likewise, 
content, including reports, calculations, charts, sales contacts and 
schedules, can be pushed back from the employee. Remote workers don’t 
have to physically sync up with an office machine for data warehouse 
information to be updated, creating an environment in which data can be 
refreshed quickly.&lt;br /&gt;

&lt;h3&gt;
Change brings challenges&lt;/h3&gt;
As with any major change in doing business, there are challenges, 
particularly for executives who need to keep pace with technology while 
keeping management fundamentals in place. It isn’t enough to give 
employees a laptop and send them home to work. To tap into the promise 
of mobility, leadership must implement a comprehensive strategy that 
includes training, goal setting and support services.&lt;br /&gt;

Although technical issues must be addressed—such as creating a strong
 technical support infrastructure and making sure in-house applications 
function on devices—for many organizations, the greatest challenges may 
be in developing an effective management framework that harnesses 
mobility’s power without creating a fractured, unmanaged work force.&lt;br /&gt;

“You need to be able to trust employees when they’re out of the 
office, but the employees also have to trust you—that you’re putting 
enough structure in place that it supports them,” says Scott Morrison, 
an analyst at Gartner. “If you have strong management practices that 
take advantage of the best aspects of telework, rather than simply 
policing employee use, then a remote work force can be a huge company 
advantage.”&lt;br /&gt;

&lt;h3&gt;
Stay connected&lt;/h3&gt;
Some traditional management tactics will always be relevant, no 
matter where employees are based. Strategies like rewarding 
productivity, evaluating progress and setting goals are standard in any 
industry and every department. But when they extend to remote workers, 
those policies can create unique challenges.&lt;br /&gt;

Perhaps one of the most common difficulties relates to the 
manager/employee connection. Although text messages and e-mails may be 
frequent, the lack of personal contact can create a sense of distance, 
Morrison says. Also, some remote workers may be on different schedules, 
since one of the advantages of mobile technology is the ability to work 
at any hour in any time zone, and this can affect reaction time to 
pressing issues.&lt;br /&gt;

Another consideration is that not all employees are well-suited to 
this work style, especially if they’re completely mobile and lack an 
in-office space. “The trouble many times is that people who telework 
suffer from isolation,” Morrison says. “They don’t get the decompression
 of the water-cooler chat, and for many personality types, that’s a 
problem. People that might be extremely creative and productive in the 
office suddenly find themselves adrift at sea when they work from home.”&lt;br /&gt;

Even when employees are enthusiastic about working remotely, defining
 and tracking productivity can be a sticking point for managers. 
Traditionally, companies use workplace attendance as one measure of 
productivity, but with mobile employees, other factors such as output 
must replace that metric.&lt;br /&gt;

&lt;div class=&quot;inline-quote-right&quot;&gt;
&quot;It helps to have a trial period of 
six months to create best practices around a remote work strategy. It’s 
important to have a structured approach in launching a system.&quot;&lt;br /&gt;

&lt;div class=&quot;byline&quot;&gt;
-Scott Morrison, Gartner&lt;/div&gt;
&lt;/div&gt;
&lt;h3&gt;
Technical support&lt;/h3&gt;
Technical aspects must be addressed as well. Data management, for 
example, can be tricky, because it requires greater security controls, 
policies for sharing and access, and well-articulated principles about 
data collection.&lt;br /&gt;

An organization must also ensure security and privacy if workers are 
to access company data remotely, and this requires the input of IT with 
technical resources. It also necessitates contributions from other 
departments, such as human resources, to make sure employees are 
following directives about technology use. Job candidates should even be
 screened for mobile-friendly personality traits, such as being a 
self-starter.&lt;br /&gt;

A remote work force relies heavily on the data warehouse, Morrison 
notes, since that repository of organizational information is crucial 
for an employee’s daily operations. Therefore, managers need greater 
data warehouse functionality and reporting to determine productivity 
levels, craft projects that might extend across departments, and produce
 detailed analysis and reporting. Potholes in the highway between remote
 workers and the data warehouse can significantly slow business 
intelligence (BI) efforts and hinder productivity.&lt;br /&gt;

&lt;h3&gt;
Management strategies&lt;/h3&gt;
The first step in developing cohesive mobile policies is the creation
 of a “mobile center,” Morrison advises. This might involve hiring one 
person to direct remote working efforts, but it will more likely be a 
multi-department committee that meets regularly to look at what types of
 short-term and long-term goals are being met through the use of mobile 
technology.&lt;br /&gt;

This centralized team should train managers in developing effective 
definitions of productivity and revisit those parameters often to make 
sure they’re realistic. Morrison notes that productivity in a remote 
work force often is measured through specific deadlines and results that
 are reported frequently, sometimes on a weekly basis.&lt;br /&gt;

“Typically, it helps to have a trial period of six months to create 
best practices around a remote work strategy,” he says. “It’s important 
to have a structured approach in launching a system, and for that, you 
need a telework center of excellence.”&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/2537778781202755155/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/legions-of-remote-workers-create.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2537778781202755155'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2537778781202755155'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/legions-of-remote-workers-create.html' title='Legions of remote workers create opportunities and concerns for businesses.'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-8350683227668002782</id><published>2012-10-07T12:40:00.001+05:30</published><updated>2012-10-07T12:41:29.607+05:30</updated><title type='text'>Enterprises seek to improve access to electronic data for legal cases.</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Imagine a multinational manufacturing company being asked to search every nook and cranny of its disaster recovery backup
 tapes for documents containing hundreds of keyword search terms. The 
company spends millions of dollars conducting a search of its databases,
 only to face a multimillion-dollar fine for failing to produce all of 
the necessary documents.&lt;br /&gt;
While that manufacturing company is fictional, an increasing number 
of enterprises are confronting this very real challenge: e-discovery, 
the process through which electronic data is requested, found, secured 
and searched to be used as evidence in a court of law. E-mail, instant 
message logs, PowerPoint presentations and tweets on Twitter are among 
the data that can be called into evidence in U.S. court cases because of
 a December 2006 amendment to the U.S. Federal Rules of Civil Procedure 
to encompass electronically stored information.&lt;br /&gt;
“The bottom line is: Whether it’s e-mail, text messaging or some 
other Web 2.0 technology, everything is grist for the e-discovery mill,”
 warns Jason R. Baron, director of litigation at the U.S. National 
Archives and Records Administration, the agency responsible for 
preserving all of the documents and materials created by the federal 
government.&lt;br /&gt;
With U.S. courts empowered to order companies to quickly produce the 
right data, organizations must preserve and be prepared to examine 
mounds of electronic data with the precision of a forensics team. 
Failure to produce the right records can expose a company to legal 
fines, unfavorable judgments, increased operating costs and a tarnished 
corporate reputation.&lt;br /&gt;
These risks should serve as a wake-up call for lawyers and IT 
professionals alike, many of whom maintain a manual, ad&amp;nbsp;hoc approach to 
e-discovery in response to litigation and regulatory inquiries. Too many
 businesses rely on a hodgepodge of technologies to reactively identify 
and report relevant content and data.&lt;br /&gt;
&lt;h3&gt;
It’s now or never&lt;/h3&gt;
With data volumes at companies rapidly expanding, the time is ripe 
for organizations to view all electronic documents, no matter how 
seemingly insignificant, as critical assets that must be managed 
strategically. Fortunately, companies can take steps to better comply 
with the rules of e-discovery and minimize their risk of exposure to 
legal fines and burdensome operating costs. Here are some strategies to 
consider:&lt;br /&gt;
&lt;div class=&quot;inline-quote-right&quot;&gt;
With U.S. courts empowered to order 
companies to quickly produce the right data, organizations must preserve
 and be prepared to examine mounds of electronic data with the precision
 of a forensics team.&lt;/div&gt;
&lt;ul&gt;
&lt;li&gt;Collaborate. The cafeteria isn’t the only place a company’s IT 
employees and legal experts should cross paths. Techies and lawyers must
 work together to develop a strategy for saving and storing electronic 
documents, as well as making them readily accessible. “Lawyers, IT 
people and records managers need to sit together and decide who the 
custodians of data are before a crisis hits,” says Baron.&lt;/li&gt;
&lt;li&gt;Create standardized policies and IT practices. Don’t just leave your
 data retrieval processes to chance. Companies need to create 
standardized policies and IT practices for identifying, storing and 
collecting data, Baron advises. These policies and practices should be 
enforced enterprise-wide for a consistent e-discovery strategy.&lt;/li&gt;
&lt;li&gt;Establish an interdepartmental knowledge council. One of the 
smartest ways to avoid finger-pointing when a crisis erupts is to create
 a team of go-to people—department heads who take on the responsibility 
of overseeing the technical and legal requirements of e-discovery on an 
ongoing basis. Consisting of IT, human resources, accounting and legal 
representatives, this team pools resources and knowledge to set 
e-discovery policies and procedures as well as update one another on 
trends and developments in their respective fields.&lt;/li&gt;
&lt;li&gt;Purge regularly. Mergers, acquisitions, Web 2.0 technologies and 
legacy systems can result in a mountain of antiquated and unnecessary 
data that is nearly impossible to sift through. To avoid such a fate, 
companies would be wise to establish clear-cut policies on what records 
must be kept, how they should be stored and for how long. “Many 
corporations and institutions do not have a handle on what data each 
employee has stored in his or her account, so there’s all this knowledge
 that’s not being properly bundled or stored,” says Baron.&lt;/li&gt;
&lt;li&gt;Evangelize e-discovery. The urgency of readily retrieving electronic
 data for legal purposes shouldn’t be a secret. Companies need to inform
 employees of the importance of preserving e-mail and educate them about
 an organization’s potential exposure to fines and increased operating 
costs associated with poor records management. Seminars, webinars and 
guest speakers can help educate employees on e-discovery and minimize 
litigation risks.&lt;/li&gt;
&lt;li&gt;Put technology to use. Of course, people and processes are only a 
part of preparing for e-discovery. Powerful technology used for data 
warehousing, business intelligence (BI), e-mail archiving and records 
management can also help structure data collections for easy storage and
 retrieval. A master data management solution, for example, can provide a
 single view of an enterprise’s data, thereby improving the availability
 of high-quality information at the right time. Similarly, the right 
data backup and restoration tools can help companies get a better handle
 on the exponential growth of their data volumes as well as ensure the 
long-term survival of data in case of e-discovery needs.&lt;/li&gt;
&lt;/ul&gt;
&lt;h3&gt;
Case closed&lt;/h3&gt;
In today’s litigious world, companies must be ready and willing to 
swiftly hand over all electronic documents in a legal case. After all, 
Baron says, “any particular e-mail can be pulled out of context and used
 as evidence in litigation.” But retrieving the right information 
quickly doesn’t have to feel like searching for a needle in a virtual 
haystack. Interdepartmental collaboration, standardized policies and 
robust technology tools can help ease the process, enabling companies to
 produce all relevant records and avoid legal entanglements—because 
real-world businesses can’t afford to ignore the data challenges of 
e-discovery.&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/8350683227668002782/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/enterprises-seek-to-improve-access-to.html#comment-form' title='1 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/8350683227668002782'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/8350683227668002782'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/enterprises-seek-to-improve-access-to.html' title='Enterprises seek to improve access to electronic data for legal cases.'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>1</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-6530227829947756822</id><published>2012-10-07T12:38:00.001+05:30</published><updated>2012-10-07T12:39:09.436+05:30</updated><category scheme="http://www.blogger.com/atom/ns#" term="COGNOS"/><category scheme="http://www.blogger.com/atom/ns#" term="Data Cleansing"/><category scheme="http://www.blogger.com/atom/ns#" term="data mirror"/><category scheme="http://www.blogger.com/atom/ns#" term="DBA"/><category scheme="http://www.blogger.com/atom/ns#" term="DTS"/><category scheme="http://www.blogger.com/atom/ns#" term="ETL"/><category scheme="http://www.blogger.com/atom/ns#" term="IBM"/><category scheme="http://www.blogger.com/atom/ns#" term="OLAP"/><category scheme="http://www.blogger.com/atom/ns#" term="Oracle"/><category scheme="http://www.blogger.com/atom/ns#" term="SAS"/><title type='text'>Data Cleansing for Data Warehousing</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
How important is Extract, Transform, Load (ETL) to data Warehousing?&lt;br /&gt;
&lt;br /&gt;
Politicians raising money can be used as an analogy to compare data 
cleansing to data warehousing. There is almost no likelihood of one 
existing without the other. Data cleansing is often the most time 
intensive, and contentious, process for data warehousing projects.&lt;br /&gt;
&lt;br /&gt;
&lt;span style=&quot;font-size: medium;&quot;&gt;What is Data Cleansing?&lt;/span&gt;&lt;br /&gt;
The elevator pitch: &quot;Data cleansing ensures that undecipherable data 
does not enter the data warehouse. Undecipherable data will affect 
reports generated from the data warehouse via OLAP, Data Mining and 
KPI&#39;s.&quot;&lt;br /&gt;
A very simple example of where data cleansing would be utilized is 
how dates are stored in separate applications. Example: 11th March 2007 
can be stored as &#39;03/11/07&#39; or &#39;11/03/07&#39; among other formats. A data 
warehousing project would require the different date formats to be 
transformed to a uniform standard before being entered in the data 
warehouse.&lt;br /&gt;
&lt;br /&gt;
&lt;span style=&quot;font-size: medium;&quot;&gt;Why Extract, Transform and Load (ETL)?&lt;/span&gt;&lt;br /&gt;
Extract, Transform and Load (ETL) refers to a category of tools that 
can assist in ensuring that data is cleansed, i.e. conforms to a 
standard, before being entered into the data warehouse. Vendor supplied 
ETL tools are considerably more easy to utilized for managing data 
cleansing on an ongoing basis. ETL sits in front of the data warehouse, 
listening for incoming data. If it comes across data that it has been 
programmed to &lt;i&gt;&lt;b&gt;transform&lt;/b&gt;&lt;/i&gt;, it will make the change before &lt;i&gt;&lt;b&gt;loading&lt;/b&gt;&lt;/i&gt; the data into the data warehouse.&lt;br /&gt;
ETL tools can also be utilized to &lt;b&gt;&lt;i&gt;extract&lt;/i&gt;&lt;/b&gt; 
data from remote databases either through automatically scheduled events
 or via manual intervention. There are alternatives to purchasing ETL 
tools and that will depend on the complexity and budget for your 
project. Database Administrators (DBAs) can write scripts to perform ETL
 functionality which can usually suffice for smaller projects. 
Microsoft&#39;s SQL Server comes with a free ETL tool called Data 
Transforming Service (DTS). DTS is pretty good for a free tool but it 
does has limitations especially in the ongoing administration of data 
cleansing.&lt;br /&gt;
Example of ETL vendors are Data Mirror, Oracle, IBM, Cognos and SAS.
 As with all product selections, list what you think you would require 
from an ETL tool before approaching a vendor. It may be worthwhile to 
obtain the services of consultants that can assist with the requirements
 analysis for product selection.&lt;br /&gt;
&lt;br /&gt;
&lt;div align=&quot;center&quot;&gt;
&lt;img alt=&quot;&quot; border=&quot;0&quot; height=&quot;310&quot; src=&quot;http://www.dwreview.com/Articles/Images/rbqu_article3.GIF&quot; width=&quot;240&quot; /&gt;&lt;/div&gt;
&lt;div align=&quot;center&quot;&gt;
Figure 1. ETL sits in front of Data Warehouses&lt;/div&gt;
&lt;span style=&quot;font-size: medium;&quot;&gt;How important is Data Cleansing and ETL to the success of Data Warehousing Projects?&lt;/span&gt;&lt;br /&gt;
ETL is often out-of-sight and out-of-mind if the data warehouse is 
producing the results that match stakeholders expectations. As a results
 ETL has been dubbed the silent killer of data warehousing projects. 
Most data warehousing projects experience delays and budget overruns due
 to unforeseen circumstances relating to data cleansing.&lt;br /&gt;
&lt;span style=&quot;font-size: medium;&quot;&gt;How to Plan for Data Cleansing?&lt;/span&gt;&lt;br /&gt;
It is important is start mapping out the data that will be entered 
into the data warehouse as early as possible. This may change as the 
project matures but the documentation trail will come in extremely 
valuable as you will need to obtain commitments from data owners that 
they will not change data formats without prior notice.&lt;br /&gt;
Create a list of data that will require Extracting, Transforming and 
Loading. Create a separate list for data that has a higher likelihood of
 changing formats. Decide on whether you need to purchase ETL tools and 
set aside an overall budget. Obtain advice from experts in the field and
 evaluate if the product fits into the overall technical hierarchy of 
your organization.&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/6530227829947756822/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/data-cleansing-for-data-warehousing.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/6530227829947756822'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/6530227829947756822'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/data-cleansing-for-data-warehousing.html' title='Data Cleansing for Data Warehousing'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-7850276521275220282</id><published>2012-10-07T12:34:00.000+05:30</published><updated>2012-10-07T12:34:42.210+05:30</updated><title type='text'>The Evolving Role of the Enterprise Data Warehouse in the Era of Big Data Analytics</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
The enterprise data warehouse (EDW) community has entered a new realm
 of meeting new and growing business requirements in the era of big 
data. Common challenges include:&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;extreme integration&lt;/li&gt;
&lt;li&gt;semi- and un-structured data sources&lt;/li&gt;
&lt;li&gt;petabytes of behavioral and image data accessed through MapReduce/Hadoop&lt;/li&gt;
&lt;li&gt;massively parallel relational database&lt;/li&gt;
&lt;li&gt;structural considerations&amp;nbsp;for the EDW to support predictive and other advanced analytics.&lt;/li&gt;
&lt;/ul&gt;
&amp;nbsp;These pressing needs raise more than a few urgent questions, such as:&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;How do you handle the explosion and diversity of data sources from conventional and non-conventional sources?&lt;/li&gt;
&lt;li&gt;What new and existing technologies are needed to deepen the understanding of business through big data analytics?&lt;/li&gt;
&lt;li&gt;What technological requirements are needed to deploy big data projects?&lt;/li&gt;
&lt;li&gt;What potential organizational and cultural impacts should be considered?&lt;/li&gt;
&lt;/ul&gt;
This white paper provides detailed guidance for designing and 
administering the necessary deployment processes to meet these 
requirements. Ralph Kimball fills the&amp;nbsp;hole where there is a lack of 
specific guidance in the industry as to how the EDW needs to respond to 
the big data analytics challenge, and what design elements are needed to
 support these new requirements.&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/7850276521275220282/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/the-evolving-role-of-enterprise-data.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/7850276521275220282'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/7850276521275220282'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/the-evolving-role-of-enterprise-data.html' title='The Evolving Role of the Enterprise Data Warehouse in the Era of Big Data Analytics'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-9105377340587935904</id><published>2012-10-07T12:32:00.000+05:30</published><updated>2012-10-07T12:32:27.191+05:30</updated><category scheme="http://www.blogger.com/atom/ns#" term="data"/><category scheme="http://www.blogger.com/atom/ns#" term="data mart"/><category scheme="http://www.blogger.com/atom/ns#" term="Teradata"/><title type='text'>Make room for data</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Given enough time, most organizations will reach a point when they 
wonder how their Teradata systems can possibly take in any more data. 
Buying new hardware can solve this problem—but that may be unnecessary.&lt;br /&gt;

One company found a way to more readily accommodate its growing volumes of data. &lt;em&gt;Teradata Magazine&lt;/em&gt;
 spoke with Dietmar Trummer, senior IT architect at mobilkom austria, 
about its use of Teradata multi-value compression. Trummer explained how
 he was able to free space, increase performance and save money, all 
without reducing service.&lt;br /&gt;
&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; What prompted you to investigate using Teradata’s multi-value compression?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; Starting at the end of 2006, we anticipated performance issues in the near future due to shortage&lt;br /&gt;of free disk space Our users had data analysis results and reports to deliver on time, so 
in my role as a Teradata system user and developer of smaller data 
marts, I tried to figure out how to cope with this disk space storage 
issue.&lt;br /&gt;
&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; What approaches did you consider?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; A Management and administrators at 
mobilkom austria considered several options: buy new hardware, archive 
data to external storage or remove indexes. We limited that to these 
choices:&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Reduce redundant data by deleting specialized data marts.&lt;/strong&gt; This required developing more complex queries to rebuild the logic of the data marts.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Aggregate data and skip the details, or reduce history by removing old data.&lt;/strong&gt; We would lose information and would have to reduce our internal service portfolio.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Optimize the table definitions based on Teradata technology.&lt;/strong&gt; This option offers the use of size-optimal data types, primary indexes with optimal table distribution and multi-value compression with optimal size impact.&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; Why did you select Teradata’s multi-value compression approach?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; First of all, we didn’t want to develop more complex queries. Second, we didn’t want to reduce&lt;br /&gt;our service portfolio.&lt;br /&gt;

I was looking for a solution that had the least possible influence on
 the daily work of the users and developers. Optimizing table 
definitions appeared to be worth investigating.&lt;br /&gt;

To be honest, the multi-value compression approach was the most 
interesting. It promised a high potential and also delivered a technical
 and mathematical challenge. Besides, I had some prior experience 
implementing the compression approach.&lt;br /&gt;

Two years ago during the development of a simple data mart, I had to 
store a large amount of intermediate data in a table. The space in my 
staging database was insufficient, so I needed to find a way to reduce 
the table space.&lt;br /&gt;

However, with multi-value compression I could fit my table into the 
staging database in a short time, without the help of an administrator.&lt;br /&gt;

One year later, I adapted what I learned from this experience and 
developed a method to ease our new storage problems using multi-value 
compression.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; Is this code for using compression as simple as it looks?&lt;/h3&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre&gt; CREATE MULTISET TABLE dwh_ua.uaf_contract ( phone_id INTEGER NOT NULL, call_mode_code CHAR(1) COMPRESS (‘A’,’P’,’S’), source_table_id SMALLINT NOT NULL COMPRESS (1,4,10), charge_usage DECIMAL(18,4) COMPRESS (0.0000), ... ) PRIMARY INDEX (phone_id) &lt;/pre&gt;
&lt;/div&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt;
 Yes, it is. We optimized our biggest table using multi-value 
compression: 1TB without compression; 480GB with manual (not optimized) 
compression; 370GB with optimized compression. The sample code is 
actually a fraction of the table definition.&lt;br /&gt;

That’s the simple part—the challenging part is how to get the values for optimized compression.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; What about the 
importance of data analysis and finding the break-even point of 
compression? How does multi-value compression work, and how do you find 
that break-even point?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; Unlike other databases, the Teradata 
Database compresses specific user-defined values to zero space. That 
sounds like magic but, of course, it isn’t. The idea is to reduce the 
row size of many rows by a large amount, and to enlarge the row size of 
all rows by a small amount.&lt;br /&gt;

&lt;div class=&quot;inline-image-left&quot; style=&quot;width: 280px;&quot;&gt;
&lt;img alt=&quot;image&quot; height=&quot;193&quot; src=&quot;http://www.teradatamagazine.com/tdmo_assets/tdmo_images/make_room_table1_tn.jpg&quot; width=&quot;280&quot; /&gt;&lt;div class=&quot;caption&quot;&gt;
Click to enlarge&lt;/div&gt;
&lt;/div&gt;
The code that follows and the corresponding row storage table 
[see table 1] illustrate how this works: We observed the compression of a
 single column. All rows with a call_mode_code value contained in the 
compress list (‘A,’ ‘P’ or ‘S’) skipped the storage for this column in 
the row data. The compressed values are stored as binary code, and the 
column was reduced to zero space. Consequently, the rows got smaller. In
 table 1, the binary code “00” indicates that the value is stored in the
 row data.&lt;br /&gt;

&lt;div class=&quot;code&quot;&gt;
&lt;pre&gt; … call_mode_code CHAR(1) COMPRESS (‘A’,’P’,’S’), … 2 bit: ‘A’ = 01, ‘P’ = 10, ‘S’ = 11 &lt;/pre&gt;
&lt;/div&gt;
The
 binary code is also used in the presence bits to indicate if the values
 are not compressed. If the table contains no call_mode_code value of 
‘A,’ ‘P’ or ‘S,’ small amounts of storage are added in the presence bits
 and the rows get slightly larger. This explains why it is important to 
know the values contained in your table.&lt;br /&gt;

In summary, if you use compression, all rows—regardless of whether 
the column value is compressed—have to add the presence bits to their 
row storage.&lt;br /&gt;

Then, the question arose: How can we use this information to find the optimal compression list for a column’s table?&lt;br /&gt;

The answer is detailed mathematically, but the principle is not very 
complex: The values that occur more frequently are more likely to be 
added to the compression list. Therefore, we had to fill the compression
 list with the most frequently occurring values. The break-even point is
 reached when the addition of a new value to the list will not result in
 a further decrease of the column’s total space consumption.&lt;br /&gt;

But be aware that this equation is based only on the static point of 
view. Data changes over time, so analysis about volatility of data is 
also necessary and has to be taken into consideration when finding the 
best compression list.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; What kinds of data gain the best compression by setting “obvious” compression values? Are there problems using this technique?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; That’s difficult to generalize because we 
experienced different and unexpected kinds of data with great 
compression performance. Of course columns with large data types have a 
better compression ratio than those with small data types. Also, columns
 that contain a few very frequent values gain good compression. These 
values might be words in natural language, flags, categories, status and
 years.&lt;br /&gt;

But data columns that contain measures can also be a good source for 
compression—especially default values, zeros and values that are near 
the most frequent value of a Gaussian or Poisson distributed column.&lt;br /&gt;

The problem with what might be considered “obvious” compression 
values is that it is difficult to find the break-even point—i.e., the 
optimal compression list—without data analysis. We frequently 
experienced that manual compression with no data analysis often leads to
 compression lists that are too large. In these cases, too many values 
are used for compression, which can lead to “over-compression.” Less 
would have been better.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; You described actual 
results when using multi-value compression. How much table scan 
performance improvement did you see? Do you have examples to share?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; We didn’t analyze the table scan 
performance in a way that would enable me to present a percentage of 
performance improvement. Our result is based on the theoretical fact 
that the table scan performance is determined by the table size—and by 
our users’ experiences.&lt;br /&gt;

&lt;div class=&quot;inline-image-left&quot; style=&quot;width: 280px;&quot;&gt;
&lt;img alt=&quot;image&quot; height=&quot;147&quot; src=&quot;http://www.teradatamagazine.com/tdmo_assets/tdmo_images/make_room_table2_tn.jpg&quot; width=&quot;280&quot; /&gt;&lt;div class=&quot;caption&quot;&gt;
Click to enlarge&lt;/div&gt;
&lt;/div&gt;
Nevertheless, we conducted experiments to check a potential 
negative performance impact caused by “decoding” the compressed values 
during querying. The results showed that, on the one hand, we could not 
find a negative performance impact; on the other hand, the table scan 
performance improvement is directly proportional to the space reduction.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; How large of a saving in data size did you see in your results?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; The bar chart demonstrates a graphical 
representation of different compression scenarios of a single column. 
[See figure.] The horizontal axis breaks down the number of presence 
bits that must be used to code the compression values, and the vertical 
axis shows the size of the column in megabytes.&lt;br /&gt;

The red portion of each bar displays how much space will remain after
 the compression, and the green portion indicates how much space would 
be freed. Combined, the size of this column without compression is about
 760MB.&lt;br /&gt;

&lt;div class=&quot;inline-image-left&quot; style=&quot;width: 280px;&quot;&gt;
&lt;img alt=&quot;image&quot; height=&quot;215&quot; src=&quot;http://www.teradatamagazine.com/tdmo_assets/tdmo_images/make_room_figure_tn.jpg&quot; width=&quot;280&quot; /&gt;&lt;div class=&quot;caption&quot;&gt;
Click to enlarge&lt;/div&gt;
&lt;/div&gt;
The bars, from left to right, indicate how much space would be freed when compressing:&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;First column = 1 compression bit: the most frequent value&lt;/li&gt;
&lt;li&gt;Second column = 2 compression bits: the most and second-most frequent value&lt;/li&gt;
&lt;li&gt;Third column = 2 compression bits: the most, second-most and third-most frequent value&lt;/li&gt;
&lt;li&gt;Thirteenth column (the rightmost bar in the chart) = 4 compression bits: in this case all occurring values&lt;/li&gt;
&lt;/ul&gt;
Notice that the first compression scenario is optimal—the size of this column would be reduced to about 50MB.&lt;br /&gt;

Table 2 displays the data analysis results in textual form. The 
recommendation of the tool is to compress one value (= 1 compression 
bit), which then generates the corresponding compress clause.&lt;br /&gt;

This table is a real example from our biggest table. You can see in 
row 7 of the table that multi-value compression was used. In this case 
the developer manually set a large compress list, which resulted in a 
column size of about 290MB as shown in cell C7. This is one-third the 
size without compression, indicated in cell C6, but nearly six times the
 size of the optimally compressed column that appears in cell C8.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; Beyond the space savings, what other benefits and cost savings&lt;br /&gt;did you experience?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; The cost savings stemmed directly from the
 fact that we didn’t need to buy new hardware or perform other actions 
that would result in indirect costs, such as reducing our service 
portfolio.&lt;br /&gt;

An indirect benefit was that the overall performance of our system 
was stabilized because we could raise the level of free storage to the 
recommended percentage. Multi-value compression had its part in this. It
 also played a part in other actions, like archiving.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; Have there been any 
impacts on your end users or application developers since you 
implemented compression? Have they had to change anything?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; The end users who get our reports and 
analysis results didn’t realize that anything had changed, except that 
we were able to consistently meet their service level agreements because
 of our stabilized system performance.&lt;br /&gt;

What is interesting is the impact this procedure has on our 
developers, like me. The method was used with the intention to free some
 space in a one-time action. The administrators would analyze our 
biggest tables and change their table definitions to optimize 
compression.&lt;br /&gt;

Today all application developers in our unit know how to use it. 
During development of large to medium-sized data marts, a procedure is 
used to optimize bigger tables; therefore, the tables go into production
 with optimized compression. What is important is that the developers 
know about the pitfalls, such as data volatility, which can make a 
perfect compression change over time to a bad compression.&lt;br /&gt;

Also, our administrators didn’t confine themselves to optimizing just
 the biggest tables. They optimized medium-sized and even small tables. 
This is why we have approximately 3,000 optimized tables!&lt;br /&gt;
&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; Has compression added work to maintenance efforts?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; Yes, it has. We have been using optimized compression since June 2007, and most tables have been optimized as of January 2008.&lt;br /&gt;
We know that we have to check and re-adjust our compression lists 
because of data volatility. Therefore, the developers, as well as the 
administrators, re-analyze the biggest tables in the form of control 
samples. But we have not gained enough experience to estimate how much 
effort we’ll have to invest in compression maintenance.&lt;br /&gt;

To reduce maintenance efforts, we try to avoid compression on highly 
volatile data columns, or we use more robust and less optimal 
compression lists for those columns.&lt;br /&gt;

&lt;h3 class=&quot;question&quot;&gt;
&lt;span class=&quot;QA&quot;&gt;Q:&lt;/span&gt; How does a compression assessment tool work, and is it available to others?&lt;/h3&gt;
&lt;span class=&quot;QA&quot;&gt;A:&lt;/span&gt; I developed an Excel macro that does data 
analysis on selected columns of a specified table. It visualizes its 
results in Excel tables and charts and produces recommendations for the 
compress clause of the analyzed columns. Tables 1 and 2 show some output
 of the tool.&lt;br /&gt;

&lt;span&gt;I’ve received about 50 e-mails from Teradata users asking about
 the tool. So far, mobilkom has granted me permission to give the macro 
tool to them free for personal use. Any future requests would have to be
 negotiated. Anyone who is interested should e-mail TDCompress@mobilkom.at. Of course, since I am not a software producer, I cannot offer warranties or provide any service for this tool.&lt;/span&gt;&lt;br /&gt;

I want to mention that there are professional tools on the market 
that deal with Teradata multi-value compression, its optimization and 
similar issues. Atanasoft is one vendor of such a tool.&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/9105377340587935904/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/make-room-for-data.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/9105377340587935904'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/9105377340587935904'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/make-room-for-data.html' title='Make room for data'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-2654174067785512480</id><published>2012-10-07T12:27:00.000+05:30</published><updated>2012-10-07T12:27:36.550+05:30</updated><category scheme="http://www.blogger.com/atom/ns#" term="cube"/><category scheme="http://www.blogger.com/atom/ns#" term="data warehousing"/><category scheme="http://www.blogger.com/atom/ns#" term="DB"/><category scheme="http://www.blogger.com/atom/ns#" term="development"/><category scheme="http://www.blogger.com/atom/ns#" term="ETL"/><category scheme="http://www.blogger.com/atom/ns#" term="Fast Clone"/><category scheme="http://www.blogger.com/atom/ns#" term="Oracle"/><category scheme="http://www.blogger.com/atom/ns#" term="ROLLUP"/><category scheme="http://www.blogger.com/atom/ns#" term="Security"/><category scheme="http://www.blogger.com/atom/ns#" term="sets"/><category scheme="http://www.blogger.com/atom/ns#" term="SQL"/><title type='text'>Cube Development for Beginners</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
From school, we know that although most math tasks have plain-English formulation, we still have to state an equation with &lt;em&gt;x&lt;/em&gt;, or, sometimes, a system of equations with &lt;em&gt;x&lt;/em&gt;, &lt;em&gt;y&lt;/em&gt;,
 and maybe even more variables, to find a solution. Similarly, in a 
decision support system, we have to design a set of data objects such as
 dimensions and cubes, according to the business questions formulated in
 plain English, so that we can get those questions answered.&lt;br /&gt;
 This
 article focuses on just that: constructing a dimensional environment 
for answering business questions. In particular, it will explain how to 
come from certain analytic questions to the set of data objects needed 
to get the answers, using Oracle Warehouse Builder
 as the development tool. Since things are best understood by example, 
the article will walk you through building a simple data warehouse.&lt;br /&gt;
 &lt;h3&gt;
&lt;strong&gt;From Questions to Answers &lt;/strong&gt;&lt;/h3&gt;
A
 typical data warehouse  concentrates on sales, to help users find 
answers to questions regarding the state of the business using the 
results retrieved from a sales cube based on Time, Product, or Customer 
criteria. This article example deviates from this practice, however. 
Here, you’ll look at an example of how you might analyze the outgoing 
traffic related to a certain Website, using the information retrieved 
from a traffic cube based on Geography, Resource, and Time criteria.&lt;br /&gt;
 Suppose
 you have a Website whose resources are hosted on more than one server, 
each of which may differ from another in the way it stores traffic 
statistics. The storage types used vary from a relational database to 
flat files. You need to consolidate traffic statistics over all of the 
servers so that you can analyze users activity over resources being 
accessed, date and time, and geographical location to answer the 
questions such as the following:&lt;br /&gt;
 &lt;ul type=&quot;square&quot;&gt;
&lt;li&gt;What      are our five most attractive resources on the site?&lt;/li&gt;
&lt;li&gt;Users      from what country loaded this resource most of all over the course of the      previous year?&lt;/li&gt;
&lt;li&gt;Connections      from what region generated most outgoing traffic on the site for the last      three months?&lt;/li&gt;
&lt;/ul&gt;
At
 first glance, it looks like you can use SQL alone to get these  
questions answered. After all, the CUBE, ROLLUP, and GROUPING SETS 
extensions to SQL are specifically designed to perform aggregation over 
multiple dimensions of data. Remember, however, that some data here is 
stored in flat files, which makes the SQL-based approach impractical. 
Moreover, looking at the above questions, you may notice that some 
require one year of historical data to get answered. What this means, in
 practice, is that you’ll also need to access archives containing 
historical data derived from transaction data and implemented as 
separate sources. In SQL, managing all those sources and transforming 
the data in each one into a consistent format for unified query 
operations would be quite laborious and error-prone. &lt;br /&gt; &lt;br /&gt; So, briefly, &amp;nbsp;the main tasks here are:&lt;br /&gt;
 &lt;ul type=&quot;square&quot;&gt;
&lt;li&gt;Consolidate      data stored in disparate sources into a consistent format.&lt;/li&gt;
&lt;li&gt;Work      with historical data derived from transaction data.&lt;/li&gt;
&lt;li&gt;Use      preloaded data to speed up queries.&lt;/li&gt;
&lt;li&gt;Organize      data in a way convenient for dimensional analysis.&lt;/li&gt;
&lt;/ul&gt;
A
 data warehouse is designed precisely to  perform these tasks. Just to 
recap, a warehouse is a relational database tuned to handle analytic 
queries rather than transaction processing, and is kept up to date with 
periodical refreshes and updates, downloading a subset of data from its 
sources by the ETL (extraction, transformation, and loading) process 
(scheduled normally for a particular day of the week or at a 
predetermined time of the day or night). The loaded data is transformed 
into a consistent format, and loaded into the warehouse target object., 
Once populated, the warehouse is typically available for queries through
 dimensional objects such as cubes and dimensions. Schematically, this 
might look like the figure below:&lt;br /&gt;
 &lt;div align=&quot;center&quot;&gt;
&lt;strong&gt;&lt;img alt=&quot;cube-development-f1&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/359589.gif&quot; /&gt;&lt;br /&gt; Figure 1 &lt;/strong&gt;Gathering data from disparate sources and transforming it into useful information available to business users.&lt;/div&gt;
In
 particular, for this example, a simple warehouse consisting of a cube 
with several dimensions is an appropriate solution. Since the traffic is
 the subject matter here, you might want to define outgoing traffic as 
the cube measure. For simplicity, in this example we will measure 
outgoing traffic based on the size of the resource being accessed. For 
example, if someone downloads a 1MB file from your site, then it’s 
assumed that 1MB of outgoing traffic will be generated. It’s similar in 
meaning to how the dollar amount of a purchase depends on the price of 
the product chosen. While dollar amount is normally a measure of a sales
 cube, price is a characteristic of product that is usually used as a 
dimension in that cube. A similar situation is here, while outgoing 
traffic is our traffic cube’s measure, resource size is a characteristic
 of resource that will be used as a dimension.&lt;br /&gt;
 Moving on to 
dimensions, the set of ones to be used in the cube in this example can 
be determined by examining the list of questions you have to answer. So,
 looking over the questions listed at the beginning of this section, you
 might want to use the following dimensions to organize the data in the 
cube:&lt;br /&gt;
 &lt;ul type=&quot;square&quot;&gt;
&lt;li&gt;&lt;em&gt;Geography&lt;/em&gt;,      which organizes the data related to the geography locations the site users      come from&lt;/li&gt;
&lt;li&gt;&lt;em&gt;Resource&lt;/em&gt;,      which categorizes the data related to the site resources&lt;/li&gt;
&lt;li&gt;&lt;em&gt;Time&lt;/em&gt;,      which is used to aggregate traffic data across time&lt;/li&gt;
&lt;/ul&gt;
Each
 traffic record will have specific values for each geography location, 
for each resource, and for each day and time. To clarify, the time value
 in a traffic record refers to the time the resource is accessed.&lt;br /&gt;
 The
 next essential step is to define the levels of aggregation of data for 
each dimension, organizing those levels into hierarchies. As for the 
Geography dimension, you might define the following hierarchy of levels 
(with the highest level listed first):&lt;br /&gt;
 &lt;ul type=&quot;square&quot;&gt;
&lt;li&gt;Region&lt;/li&gt;
&lt;li&gt;Country&lt;/li&gt;
&lt;/ul&gt;
The hierarchy of levels for the Resource dimension might look like this:&lt;br /&gt;
 &lt;ul type=&quot;square&quot;&gt;
&lt;li&gt;Group&lt;/li&gt;
&lt;li&gt;Resource&lt;/li&gt;
&lt;/ul&gt;
The time dimension might contain the following hierarchy:&lt;br /&gt;
 &lt;ul type=&quot;square&quot;&gt;
&lt;li&gt;Year&lt;/li&gt;
&lt;li&gt;Month&lt;/li&gt;
&lt;li&gt;Day&lt;/li&gt;
&lt;/ul&gt;
In
 more complex and realistic scenarios, a dimension may contain more than
 one hierarchy—for example, fiscal versus calendar year. In this 
particular example, however, each dimension will have only a single 
hierarchy.&lt;br /&gt;
 &lt;h3&gt;
&lt;strong&gt;Implementing a Data Warehouse with Oracle Warehouse Builder &lt;/strong&gt;&lt;/h3&gt;
Now
 that you’ve decided what objects you need to have in the warehouse, you
 can design and build them .This task can be accomplished with Oracle 
Warehouse Builde, which is part of the standard installation of Oracle 
Database, starting with Oracle Database 11&lt;em&gt;g&lt;/em&gt; Release 1. To enable it, though, some preliminary steps are required:&lt;br /&gt;
 First of all, you need to unlock database schemas used by Oracle Warehouse Builder. In Oracle Warehouse Builder 11&lt;em&gt;g&lt;/em&gt; Re1ease 1, the OWBSYS schema is used; in 11&lt;em&gt;g &lt;/em&gt;Release
 2, both OWBSYS and OWBSYS_AUDIT are used. These schemas hold the OWB 
design and runtime metadata. This can be done with the following 
commands, connecting to SQL*Plus as SYS or SYSDBA:&lt;br /&gt;
 &lt;pre&gt;ALTER USER OWBSYS IDENTIFIED BY &lt;em&gt;owbsyspwd&lt;/em&gt; ACCOUNT UNLOCK;
ALTER USER OWBSYS_AUDIT IDENTIFIED BY &lt;em&gt;owbsys_auditpwd&lt;/em&gt; ACCOUNT UNLOCK;
&lt;/pre&gt;
&lt;br /&gt; Next, you must create a Warehouse Builder workspace. A 
workspace contains the objects for one or more data warehousing 
projects; in complex environments, you may have several workspaces. 
(Instructions for creating a workspace are in the &lt;em&gt;Oracle Warehouse Builder Installation and Administration Guide for Windows and Linux.&lt;/em&gt;Follow the instructions to create a new workspace with a new user as workspace owner.)&lt;br /&gt;
 Now
 you can launch the Warehouse Builder Design Center, which is the 
primary graphical user interface of Oracle Warehouse Builder. &amp;nbsp;Click &lt;strong&gt;Show Details&lt;/strong&gt;
 and connect to the Design Center as the newly created workspace user, 
with the required host/port/service name or Net Service name.&lt;br /&gt;
 Before
 going any further, though, let’s outline the set of tasks to 
accomplish. Broadly described, the tasks to be done in this example are:&lt;br /&gt;
 &lt;ul type=&quot;square&quot;&gt;
&lt;li&gt;Define a data warehouse to      house the dimensional objects described earlier&lt;/li&gt;
&lt;li&gt;Consolidate data from various      data sources&lt;/li&gt;
&lt;li&gt;Implement the dimensional      objects: dimensions and cube&lt;/li&gt;
&lt;li&gt;Load data extracted from the sources      into the dimensional objects&lt;/li&gt;
&lt;/ul&gt;
The
 following sections describe how to accomplish the above tasks, 
implementing the dimensional solution discussed here. Before you proceed
 to it, though, you have to decide what implementation model will be 
used. In fact, you have two options: a relational target warehouse, 
which stores the actual data in relational tables, or a multidimensional
 warehouse. In the latter case, dimensional data is stored in an Oracle 
OLAP analytic workspace. This feature is available in Oracle Database 10&lt;em&gt;g&lt;/em&gt; and Oracle Database 11&lt;em&gt;g&lt;/em&gt;. For this example, , the dimensional model will be implemented as a relational target warehouse.&lt;br /&gt;
 &lt;h3&gt;
&lt;strong&gt;Defining a Target Schema&lt;/strong&gt;&lt;/h3&gt;
In
 this initial step, you begin by creating a new project or configuring 
the default one in the OWB Design Center. Then, you might identify the 
target schema that will be used to contain the target data objects: the 
dimensions and cube described earlier in this article.&lt;br /&gt;
 Assuming 
you&#39;ve decided to use the default project MY_PROJECT, let’s move on to 
creating the target schema. The steps below describe the process of 
creating the target schema and then a target module upon that schema in 
the Design Center:&lt;br /&gt;
 &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Globals Navigator,      right-click the &lt;strong&gt;Security-&amp;gt;Users &lt;/strong&gt;node and select &lt;strong&gt;New User&lt;/strong&gt; in the popup      menu to launch the Create User wizard.&lt;/li&gt;
&lt;li&gt;On the Select DB user to      register screen, click  &lt;strong&gt;Create DB User…&lt;/strong&gt; to open the Create Database      User dialog.&lt;/li&gt;
&lt;li&gt;In
 the Create Database User      dialog, enter the system user password 
and then specify the user name,      say, owbtarget and the password for
 a new database user. Then, click &lt;strong&gt;OK&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;You’ve
 now returned to the      Select DB user to register screen, where the 
newly created owbtarget user      should show up in the Selected Users 
pane. Click &lt;strong&gt;Next&lt;/strong&gt; to continue.&lt;/li&gt;
&lt;li&gt;On the Check to create a      location screen, make certain that the &lt;strong&gt;To Create a location&lt;/strong&gt; checkbox is      checked for the &lt;strong&gt;owbtarget&lt;/strong&gt; user, and then click &lt;strong&gt;Next&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;On the Summary screen, click      &lt;strong&gt;Finish&lt;/strong&gt; to complete the process.&lt;/li&gt;
&lt;/ol&gt;
As
 a result of the above steps, the owbtarget schema is created in the 
database. (Also, the owbtarget user should show up under the 
Security-&amp;gt;Users node in the Globals Navigator.) The next step is to 
create a target module upon the newly created database schema. As a 
quick recap, you use modules in Warehouse Builder to organize the 
objects you’re dealing with into subject-oriented groups. So, the 
following steps describe how you might build an Oracle module upon the 
owbtarget database schema:&lt;br /&gt;
 &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases&lt;/strong&gt; node and right-click the &lt;strong&gt;Oracle&lt;/strong&gt; node.&lt;/li&gt;
&lt;li&gt;In the popup menu, select &lt;strong&gt;New      Oracle Module &lt;/strong&gt;to launch the wizard.&lt;/li&gt;
&lt;li&gt;On
 the Name and Description      screen of the wizard, specify a name for 
the module being created, say,      target_mdl. As of the module status,
 you can leave Development.&lt;/li&gt;
&lt;li&gt;On the Connection Information  
    screen, first make sure that the selected location is the one 
associated      with the target_mdl module being created (it may appear 
under the      TARGET_MDL_LOCATION1 name). Then, you need to provide the
 connection      information for this location. So, click the &lt;strong&gt;Edit…&lt;/strong&gt; button and provide the      details of the Oracle database location, specifying &lt;strong&gt;owbtarget&lt;/strong&gt;
 as the User      Name. After you’re done with it, you might want to 
make sure that      everything is correct and test the connection by 
clicking the &lt;strong&gt;Test      Connection&lt;/strong&gt; button. Close all of the dialogs opened by clicking &lt;strong&gt;OK&lt;/strong&gt; to      return to the Connection Information screen. Click &lt;strong&gt;Next&lt;/strong&gt; to continue.&lt;/li&gt;
&lt;li&gt;On the Summary screen, click      &lt;strong&gt;Finish&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;In the Design Center, select      &lt;strong&gt;File-&amp;gt;Save All &lt;/strong&gt;to save the module you just created.&lt;/li&gt;
&lt;/ol&gt;
As
 a result of the above steps, the TARGET_MDL module should appear under 
the MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle node in the Projects Navigator. 
If you expand the module node, you’ll see what types of objects you can 
create under it. Among others, it includes nodes for holding: cubes, 
dimensions, tables, and external tables.&lt;br /&gt;
 &lt;h3&gt;
&lt;strong&gt;Consolidating data from disparate data sources &lt;/strong&gt;&lt;/h3&gt;
Here,
 you will need not only to extract data from disparate sources but also 
transform the extracted data in a way so that it can be consolidated 
into a single data source. Thus, this task usually has the following 
stages:&lt;br /&gt;
 &lt;ol&gt;
&lt;li&gt;Import the metadata into      OracleWarehouse Builder .&lt;/li&gt;
&lt;li&gt;Design ETL operations.&lt;/li&gt;
&lt;li&gt;Load source data into the      warehouse.&lt;/li&gt;
&lt;/ol&gt;
What
 you need to start with however is develop a general strategy for 
extracting the source data, transforming it, and loading it into the 
warehouse. That said, you first have to make strategic decisions about 
how to best implement the task of consolidating data from data sources.&lt;br /&gt;
 As
 far as flat files are concerned, your first decision to be made is 
probably about how you’re going to move data from them into the 
warehouse. The available options include: utilizing SQL*Loader or 
through external tables. In this particular example, using external 
tables seems to be a preferable option because the data being extracted 
from the flat files has to be joined with relational data. If you 
recall, the example assumes the source data is to be extracted from both
 database tables and flat files.&lt;br /&gt;
 Your next decision to be made is
 of whether to define a source module for the source data objects you’re
 going to use. Although it’s is generally considered good practice to 
keep source and target objects in separate modules, for this simple 
example we will create all objects in a single database module.&lt;br /&gt;
 Now let’s take a closer look at the data sources that will be accessed.&lt;br /&gt;
 As
 mentioned earlier, what we have here is a Website whose resources are 
hosted on more than one server, each of which differs from another in 
the way it stores traffic statistics. For example, one server stores it 
in flat files and another in the database. The content of a flat file 
containing real-time data and called, say, access.csv might look like 
this:&lt;br /&gt;
 &lt;pre&gt;User IP,Date Time,Site Resource
67.212.160.0,5-Jan-2011 20:04:00,/rdbms/demo/demo.zip 
85.172.23.0,8-Jan-2011 12:54:28,/articles/vasiliev_owb.html
80.247.139.0,10-Jan-2011 19:43:31,/tutorials/owb_oracle11gr2.html &lt;/pre&gt;
&lt;br /&gt;
 As you can see, the above file contains information about accessing 
resources by users, storing it in comma-separated value (CSV) format.&lt;em&gt;&lt;strong&gt; &lt;/strong&gt;&lt;/em&gt;In
 turn, the server that uses the database in place of flat files might 
store this same information in an accesslog table with the following 
structure:&lt;br /&gt;
 &lt;pre&gt;USERIP&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2(15)
DATETIME&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; DATE
SITERESOURCE&amp;nbsp; &amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;VARCHAR2(200)&lt;/pre&gt;
&lt;br /&gt; As you might
 guess, in this example, IP address data is necessary to identify the 
geolocation of the user accessing a resource. In particular, it allows 
you to deduce the geographic location down to the region, country, city,
 and even organization the IP address belongs to. To obtain this 
information from an IP address, you might use one of a number of free or
 paid subscription geolocation databases available today. Alternatively,
 you might utilize the geolocation information provided by the user 
during his/her registration, thus relying on the information stored in 
your own database. In that case, however, you’d probably want to rely on
 the user’s id rather than the IP address.&lt;br /&gt;
 For the purpose of 
this example, we will use a free geolocation database that identifies IP
 address ranges on a country level, such as MaxMind&#39;s GeoLite Country
 database. (Maxmind also offers more accurate paid databases for country
 and city-level geolocation data.)For more details, you can check out 
the MaxMind Website.&lt;br /&gt;
 The 
GeoLite Country database is stored as a CSV file containing geographical
 data for publicly assigned IPv4 addresses, thus allowing you to 
determine the user&#39;s country based on the IP address. To take advantage 
of this database you need to download the zipped CSV file, unzip it, and
 then import the data into your data warehouse. The imported data will 
be then joined with the Web traffic statistics data obtained from the 
flat files and the database discussed earlier in this section.&lt;br /&gt;
 Examining
 the structure of the GeoLite Country CSV file, you may notice that 
aside of the IP diapasons each of which is assigned to a particular 
country and defined by their beginning and ending IP addresses 
represented in dot-decimal notation, it also includes corresponding IP 
numbers derived from those IP addresses with the help of the following 
formula:&lt;br /&gt;
 &lt;pre&gt;IP Number = 16777216*w + 65536*x + 256*y + z
&lt;/pre&gt;
&lt;br /&gt; where&lt;br /&gt;
 &lt;pre&gt;IP Address = w.x.y.z
&lt;/pre&gt;
&lt;br /&gt; The obvious advantage of using IP numbers rather than 
direct IP addresses is that IP numbers, being regular decimal numbers, 
can be easily compared, which simplifies the task of determining to what
 country the corresponding IP address belongs. The problem is however, 
that our traffic statistics data sources store direct IP addresses 
rather than the numbers derived from them. You will have to transform 
the Web traffic data so that the result of this transformation includes 
IP numbers rather than IP addresses.&lt;br /&gt;
 The following diagram gives a graphical depiction of transforming and joining data:&lt;br /&gt;
 &lt;div style=&quot;text-align: center;&quot;&gt;
&amp;nbsp;&lt;img alt=&quot;cube-development-f2&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/359590.gif&quot; /&gt;&lt;/div&gt;
&lt;div align=&quot;center&quot;&gt;
&lt;strong&gt;Figure 2&lt;/strong&gt; Oracle Warehouse Builder is extracts, transforms, and joins the source data.&amp;nbsp;&lt;/div&gt;
Remember
 that dimension and cube data are usually derived from more than one 
data source. For this example, in addition to the &amp;nbsp;traffic statistics 
and geolocation data, you will also need the data sources containing the
 resource and region information. For that purpose, you might assume you
 have two database tables: RESOURCES and REGIONS. Assume the RESOURCES 
table has the following structure:&lt;br /&gt;
 &lt;pre&gt;SITERESOURCE&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2(200)&amp;nbsp;&amp;nbsp;&amp;nbsp; PRIMARY KEY
RESOURCESIZE&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; NUMBER(12)
RESOURCEGROUP&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2(10)&lt;/pre&gt;
&lt;br /&gt; And assume the REGIONS table is defined as follows:&lt;br /&gt;
 &lt;pre&gt;COUNTRYID&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2(2)&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; PRIMARY KEY
REGION&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2(2)&lt;/pre&gt;
&lt;br /&gt; The data in the above tables will be joined with the Web traffic statistics and geolocation data.&lt;br /&gt;
 Now
 that you understand the structure and the meaning of your source data, 
it’s time to move on and define all the necessary data objects in the 
Warehouse Builder. We will create the objects needed for the flat files 
first. &amp;nbsp;The general steps to perform are the following:&lt;br /&gt;
 &lt;ol&gt;
&lt;li&gt;Create a new flat file module      in the project and associate it with the location where your source flat      files reside.&lt;/li&gt;
&lt;li&gt;Within the newly created flat      file module, define the flat files of interest and specify their      structure.&lt;/li&gt;
&lt;li&gt;Add
 external tables to the      target warehouse module defined as 
discussed in the preceding section,      associating those tables with 
the flat files created in the above step.&lt;/li&gt;
&lt;li&gt;Import the accesslog, resources, and regions      database tables to the target warehouse module.&lt;/li&gt;
&lt;/ol&gt;
To create the flat file module, follow these steps in the Design Center:&lt;br /&gt;
 &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      right-click the &lt;strong&gt;MY_PROJECT-&amp;gt;Files&lt;/strong&gt; node and choose &lt;strong&gt;New Flat File Module&lt;/strong&gt; in the popup menu.&lt;/li&gt;
&lt;li&gt;On
 the Name and Description      screen of the wizard, specify a name for 
the module being created, or      leave the default. Then, click &lt;strong&gt;Next&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;On the Connection Information      screen, click the &lt;strong&gt;Edit… &lt;/strong&gt;button on the right of the &lt;strong&gt;Location&lt;/strong&gt; select box.&lt;/li&gt;
&lt;li&gt;In
 the Edit File System      Location dialog, specify the location where 
the flat files from which you      want to extract data can be found. 
Click &lt;strong&gt;OK&lt;/strong&gt; to come back to the wizard.&lt;/li&gt;
&lt;li&gt;On the Summary screen, click      &lt;strong&gt;Finish&lt;/strong&gt; to complete the wizard.&lt;/li&gt;
&lt;/ol&gt;
Now
 you can define a new flat file within the newly created flat file 
module. Let’s start with creating a flat file object for the access.csv 
file you saw earlier in this section:&lt;br /&gt;
 &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      right-click the &lt;strong&gt;MY_PROJECT-&amp;gt;Files-&amp;gt;FLAT_FILE_MODULE_1&lt;/strong&gt; node and      select &lt;strong&gt;New Flat File&lt;/strong&gt; to launch the Create Flat File wizard.&lt;/li&gt;
&lt;li&gt;On
 the Name and Description      screen of the wizard, specify a name for 
the flat file object being      created, say, ACCESS_CSV_FF. Then, make 
sure to      specify the physical file name. On this page, you can also 
change the      character set or accept the default presented in the 
wizard.&lt;/li&gt;
&lt;li&gt;On      the File Properties screen, make sure that the record delimiter character      is set to carriage return: &lt;strong&gt;&amp;lt;CR&amp;gt;&lt;/strong&gt;, and the field delimiter is set to      &lt;strong&gt;(,)&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;On      the Record Type Properties screen, make sure that &lt;strong&gt;Single Record &lt;/strong&gt;is      selected.&lt;/li&gt;
&lt;li&gt;On
      the Field Properties screen, you’ll need to define the structure 
of the      access.csv file record, setting the SQL properties for each 
field. Please      note that the first set of properties that follows 
the Name property are      SQL*Loader properties. You don’t have to 
define those properties however,      because you’re going to use the 
external table option rather than the      SQL*Loader utility. External 
tables are the most performant way to load      flat file data into 
Oracle data warehouses. So, you’ll need to scroll      right to get to 
the second set of properties: SQL properties. Define the      properties
 as follows:    &lt;br /&gt;         &lt;br /&gt;         &lt;pre&gt;&lt;strong&gt;Name&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; SQL Type&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; SQL Length &lt;/strong&gt;USERIP&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 15 
DATETIME&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; DATE
SITERESOURCE&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 200 &lt;/pre&gt;
&lt;/li&gt;
&lt;li&gt;On the Summary screen, click      &lt;strong&gt;Finish&lt;/strong&gt; to complete the wizard.&lt;/li&gt;
&lt;/ol&gt;
At this point, you may want to commit your changes to the repository. Select&lt;strong&gt; File-&amp;gt;Save All &lt;/strong&gt;to commit your changes.&lt;br /&gt;
     Repeat
 the above steps for the GeoIPCountryWhois.csv file that contains the 
geolocation data, defining the following properties on the Field 
Properties screen of the wizard:&lt;br /&gt;
     &lt;pre&gt;&lt;strong&gt;Name&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; &amp;nbsp;SQL Type&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; SQL Length &lt;/strong&gt;
STARTIP&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 15 
ENDIP&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 15 
STARTNUM&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 10 
ENDNUM&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 10 
COUNTRYID&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 2 
COUNTRYNAME&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; VARCHAR2&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp; 100 &lt;/pre&gt;
&lt;br /&gt;     Once these 
are done, define external table objects in the the target module. These 
will expose the flat file data as tables in the database To define an 
external table upon the ACCESS_CSV_FF flat file object created earlier, 
follow the steps below:&lt;br /&gt;
     &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL &lt;/strong&gt;node,      right-click &lt;strong&gt;External Tables&lt;/strong&gt; and select &lt;strong&gt;New External Table&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;On the Name and Description      screen of the wizard, specify a name for the external table, say,      ACCESS_CSV_EXT.&lt;/li&gt;
&lt;li&gt;On the File Selection screen,      select ACCESS_CSV_FF that you should see under the      FLAT_FILE_MODULE1.&lt;/li&gt;
&lt;li&gt;On the Locations screen, select      the location where the external table will be deployed.&lt;/li&gt;
&lt;li&gt;On the Summary screen, click      &lt;strong&gt;Finish&lt;/strong&gt; to complete the wizard.&lt;/li&gt;
&lt;/ol&gt;
Repeat the above steps for the GEOLOCATION_CSV_FF flat file object.&lt;br /&gt;
     Now
 that you have all of the necessary object definitions created you have 
to deploy them to the target schema before you can use them. Another 
preliminary step is to make sure that the target schema in the database 
is granted the privileges to create and drop directories. For that, you 
can connect to SQL*Plus as sysdba and issue the following statements:&lt;br /&gt;
     &lt;pre&gt;GRANT CREATE ANY DIRECTORY TO owbtarget; 
GRANT DROP ANY DIRECTORY TO owbtarget; &lt;/pre&gt;
&lt;br /&gt;     After 
that, you can come back to the Design Center and proceed to deploying. 
The following steps describe how to deploy the external tables:&lt;br /&gt;
     &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;External      Tables&lt;/strong&gt; node and select both the ACCESS_CSV_EXT and GEOLOCATION_CSV_ EXT nodes.&lt;/li&gt;
&lt;li&gt;Right-click the selection and      choose &lt;strong&gt;Deploy … &lt;/strong&gt;The
 process starts with compiling the selected objects and      then 
proceeds to deploying, which may take some time to complete.&lt;/li&gt;
&lt;/ol&gt;
If
 the deployment has completed successfully, this means you have the 
definitions of the external tables created in the target schema within 
the database, and, therefore, you can query those tables. To confirm 
that everything is going as planned so far, it would be a good idea to 
look at the data you can access through the newly deployed tables. The 
simplest way to do this is by selecting the &lt;strong&gt;Data…&lt;/strong&gt; command in the popup menu that appears when you right-click the node of an external table in the Project Navigator.&lt;br /&gt;
     While
 you should have no problem with the GEOLOCATION_CSV_ EXT table 
containing about 140,000 rows, you may see nothing when it comes to the 
ACCESS_CSV_EXT data. The first thing you might want to check out to 
determine where the problem lies are the ACCESS_CSV_EXT’s access 
parameters, which you can access through the ALL_EXTERNAL_TABLES data 
dictionary view. Thus, being connected as sysdba to SQL*Plus, you might 
issue the following query:&lt;br /&gt;
     &lt;pre&gt;SELECT access_parameters FROM all_external_tables 
WHERE table_name =&#39;ACCESS_CSV_EXT&#39;; &lt;/pre&gt;
&lt;br /&gt;     The output should look like this:&lt;br /&gt;
     &lt;pre&gt;records delimited by newline
   &amp;nbsp;characterset we8mswin1252
   &amp;nbsp;string sizes are in bytes
   &amp;nbsp;nobadfile
   &amp;nbsp;nodiscardfile
   &amp;nbsp;nologfile
   &amp;nbsp;fields terminated by &#39;,&#39;
   &amp;nbsp;notrim
   &amp;nbsp;(&quot;USERIP&quot; char,
   &amp;nbsp; &quot;DATETIME&quot; char,
   &amp;nbsp; &quot;SITERESOURCE&quot; char
   &amp;nbsp;)&lt;/pre&gt;
&lt;br /&gt;     Examining the above, you might notice that 
the DATETIME field comes with no date mask, which may cause a problem 
when accessing the date data whose format differs from the default. The 
problem can be fixed with the following ALTER TABLE statement:&lt;br /&gt;
     &lt;pre&gt;ALTER TABLE owbtarget.access_csv_ext ACCESS PARAMETERS
   (records delimited by newline
   &amp;nbsp;characterset we8mswin1252
   &amp;nbsp;string sizes are in bytes&lt;/pre&gt;
&lt;pre&gt;&amp;nbsp;nobadfile&lt;/pre&gt;
&lt;pre&gt;&amp;nbsp;nodiscardfile
   &amp;nbsp;nologfile
   &amp;nbsp;fields terminated by &#39;,&#39;
   &amp;nbsp;notrim
   &amp;nbsp;(&quot;USERIP&quot; char,
   &amp;nbsp; &quot;DATETIME&quot; char date_format date mask &quot;dd-mon-yyyy hh24:mi:ss&quot;,
   &amp;nbsp; &quot;SITERESOURCE&quot; char
   &amp;nbsp;)
   );&lt;/pre&gt;
&lt;br /&gt;     Now, returning to the Design Center, if you 
click the Execute Query button in the Data-ACCESS_CSV_EXT window, you 
should see the rows generated from the data derived from the access.csv 
file.&lt;br /&gt;
     All that is left to complete the task of creating the 
data source object definitions is to import the metadata for the source 
database tables ACCESSLOG, RESOURCES, and REGIONS described earlier in 
this section. To do this, you can follow these steps:&lt;br /&gt;
     &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL&lt;/strong&gt; node and      right-click &lt;strong&gt;Tables&lt;/strong&gt;. In the popup menu, select &lt;strong&gt;Import-&amp;gt;Database Objects…      &lt;/strong&gt;to launch the Import Metadata wizard.&lt;/li&gt;
&lt;li&gt;On the Filter Information      screen of the wizard, select &lt;strong&gt;Table&lt;/strong&gt; as the type of objects you want to      import.&lt;/li&gt;
&lt;li&gt;On the Object Selection screen,      move accesslog, regions, and resources tables from the Available to the      Selected pane.&lt;/li&gt;
&lt;li&gt;On the Summary and Import      screen, click &lt;strong&gt;Finish&lt;/strong&gt; to complete the wizard.&lt;/li&gt;
&lt;/ol&gt;
As
 a result of the above steps, the ACCESSLOG, REGIONS, and RESOURCES 
objects must appear under the 
MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Tables node in 
the Projects Navigator.&lt;br /&gt;
     &lt;h3&gt;
&lt;strong&gt;Aggregating Data Across Dimensions with Cubes &lt;/strong&gt;&lt;/h3&gt;
Having
 the source object definitions created and deployed, let’s build the 
target structure. In particular, you’ll need to build a Traffic cube to 
be used for storing aggregated traffic data. Before moving on to 
building the cube, though, you’ll have to build the dimensions that will
 make up the edges of it.&lt;br /&gt;
     If you recall from the discussion 
in the beginning of the article, you need to define the following three 
dimensions to organize the data in the cube: Geography, Resource, and 
Time. The following steps describe how you might build the Geography 
dimension and then load data into it:&lt;br /&gt;
     &lt;ol&gt;
&lt;li&gt;In the Projects Navigator, right-click node &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt; TARGET_MDL-&amp;gt;Dimensions&lt;/strong&gt; and select &lt;strong&gt;New Dimension&lt;/strong&gt; in the popup menu to launch the Create Dimension wizard.&lt;/li&gt;
&lt;li&gt;On      the Name and Description screen of the wizard, type in &lt;strong&gt;GEOGRAPHY_DM&lt;/strong&gt; in the      Name field.&lt;/li&gt;
&lt;li&gt;On      the Storage Type screen, select &lt;strong&gt;ROLAP&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;On      the Levels screen, enter the following levels:         &lt;pre&gt;Region
Country&lt;/pre&gt;
&lt;/li&gt;
&lt;li&gt;On the Level Attributes screen, make sure that all the level attributes for both the Region and Country levels are checked.&lt;/li&gt;
&lt;li&gt;On      the Slowly Changing Dimension screen, select &lt;strong&gt;Type1:Do not keep history.&lt;/strong&gt;&lt;/li&gt;
&lt;li&gt;After      you’re done with the wizard, you should see the &lt;strong&gt;GEOGRAPHY_DM&lt;/strong&gt; object under the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt; TARGET_MDL-&amp;gt;Dimensions&lt;/strong&gt; node in      the Project Navigator. Now, right-click it and select &lt;strong&gt;Bind&lt;/strong&gt;.
 As a result,      table GEOGRAPHY_DM_TAB should appear under the 
MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Tables      node.
 Right-click it and select &lt;strong&gt;Deploy…&lt;/strong&gt; Also, the &lt;strong&gt;GEOGRAPHY_DM_SEQ&lt;/strong&gt;
 should      appear under the      
MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Sequences node,  
    which you have to deploy too. After both deployments have been 
completed,      come back to GEOGRAPHY_DM and deploy it.&lt;/li&gt;
&lt;/ol&gt;
Now you will define an ETL mapping that loads the GEOGRAPHY_DM dimension from the source data. The steps are as follows:&lt;br /&gt;
     &lt;ol&gt;
&lt;li&gt;In the Projects Navigator,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL&lt;/strong&gt; node and      right-click &lt;strong&gt;Mappings&lt;/strong&gt;. In the popup menu, select &lt;strong&gt;New Mapping &lt;/strong&gt;to launch the      Create Mapping dialog. In this dialog, specify the mapping name, say, GEOGRAPHY_DM_MAP. After you click &lt;strong&gt;OK&lt;/strong&gt;, the      Mapping Editor canvas should appear.&lt;/li&gt;
&lt;li&gt;In the Projects Navigator, expand      the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Tables&lt;/strong&gt; node, and then drag and drop the REGIONS table to the GEOGRAPHY_DM_MAP’s      mapping canvas in the Mapping Editor.&lt;/li&gt;
&lt;li&gt;Then,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Dimensions      &lt;/strong&gt;node and drag      and drop the GEOGRAPHY_DM dimension to the mapping canvas, to the right of      the REGIONS table operator.&lt;/li&gt;
&lt;li&gt;In
      the mapping canvas, connect the COUNTRYID attribute of the REGIONS
      operator to the COUNTRY.NAME attribute of GEOGRAPHY_DM, and then 
connect      the COUNTRYID attribute of the REGIONS operator to the 
COUNTRY.DESCRIPTION      attribute of GEOGRAPHY_DM.&lt;/li&gt;
&lt;li&gt;Similarly,
      connect the REGION attribute of the REGIONS operator to the 
REGION.NAME,      COUNTRY. REGION_NAME and REGION.DESCRIPTION attributes
 of GEOGRAPHY_DM.&lt;/li&gt;
&lt;li&gt;In the Projects Navigator, expand      the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Mappings&lt;/strong&gt; node. Right-click      &lt;strong&gt;GEOGRAPHY_DM_MAP&lt;/strong&gt;, and then select &lt;strong&gt;Deploy…&lt;/strong&gt; in the popup menu.&lt;/li&gt;
&lt;li&gt;The
 final step here is to load      the GEOGRAPHY_DM dimension. To do this,
 you need to execute      the GEOGRAPHY_DM_MAP      mapping. Thus, 
right-click &lt;strong&gt;GEOGRAPHY_DM_MAP&lt;/strong&gt; and select      &lt;strong&gt;Start… &lt;/strong&gt;&lt;/li&gt;
&lt;li&gt;Similarly,
 you should create and deploy the RESOURCE_DM and RESOURCE_DM_MAP 
objects, using the resources table as the source and specifying the 
following levels in the RESOURCE_DM dimension:         &lt;pre&gt;   
Group
Resource&lt;/pre&gt;
&lt;/li&gt;
&lt;li&gt;When defining the RESOURCE_DM dimension 
attributes, don’t forget to increase the length of both the NAME and 
DESCRIPTION attributes to 200, so that they can be connected with the 
SITERESOURCE attribute of the RESOURCES operator.&lt;/li&gt;
&lt;/ol&gt;
Finally,
 you need to create a time dimension. The easiest way to do this is to 
use the Create Time Dimension wizard, which defines both a Time 
Dimension object and an ETL mapping to load it for you. For details on 
how to create and populate a time dimension, you can refer to the 
Creating Time Dimensions section in the Oracle Warehouse Builder Data 
Modeling, ETL, and Data Quality Guide.&lt;br /&gt;
     Once you have the dimensions set up, carry out the following steps to define a cube:&lt;br /&gt;
     &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In      the Projects Navigator, right-click node      &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Cubes&lt;/strong&gt; and      select &lt;strong&gt;New Cube &lt;/strong&gt;in the popup menu.&lt;/li&gt;
&lt;li&gt;On      the Name and Description screen of the wizard, enter the cube name in the      Name field: &lt;strong&gt;TRAFFIC&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;On      the Storage Type screen, select &lt;strong&gt;ROLAP: Relational storage&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;On
      the Dimensions screen, move all the available dimensions from the 
Available      Dimensions pane to the Selected Dimensions pane, so that 
you have the      following dimensions selected:         &lt;pre&gt;   
RESOURCE_DM
GEOGRAPHY_DM
TIME_DM&lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;ol start=&quot;5&quot; type=&quot;1&quot;&gt;
&lt;li&gt;On      the Measures screen, enter the following measures:         &lt;pre&gt;OUT_TRAFFIC with the data type NUMBER &lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;ol start=&quot;6&quot; type=&quot;1&quot;&gt;
&lt;li&gt;After
      the wizard is completed, the TRAFFIC cube and TRAFFIC_TAB table 
should      appear in the Project Navigator. You must deploy them before
 going any      further.&lt;/li&gt;
&lt;/ol&gt;
As with a dimension, the next step to be done is to create a mapping defining how the source data will be loaded into the cube.&lt;br /&gt;
     &lt;h3&gt;
&lt;strong&gt;Transforming the Source Data for the Cube Loading&lt;/strong&gt;&lt;/h3&gt;
So
 now, you have to design ETL mappings that will transform the source 
data and load it into the cube. Here is the list of the transformation 
operations you’ll need to design:&lt;br /&gt;
     &lt;ol&gt;
&lt;li&gt;Combine the 
rows of the      access_csv_ext external table and the accesslog 
database table into a      single row set consolidating the traffic 
statistics data.&lt;/li&gt;
&lt;li&gt;Transform IP addresses within      the
 traffic statistics data into corresponding IP numbers to simplify the  
    task of determining the &lt;strong&gt;diapason? &lt;/strong&gt;an IP address in question belongs      to.&lt;/li&gt;
&lt;li&gt;Join the traffic statistics      data with the geographical data.&lt;/li&gt;
&lt;li&gt;Aggregate the joined data,      loading the output data set to the cube.&lt;/li&gt;
&lt;/ol&gt;
As
 mentioned, the above operations must be described in a mapping. Before 
moving on to creating a mapping, though, let’s define a transformation 
described at the second step above. This transformation will be 
implemented as a PL/SQL function. The following steps describe how you 
might do that without leaving the Design Center:&lt;br /&gt;
     &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      expand the      &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Transformations      &lt;/strong&gt;node and right-click &lt;strong&gt;Functions&lt;/strong&gt;. In the popup menu, select &lt;strong&gt;New Function&lt;/strong&gt;.&lt;/li&gt;
&lt;li&gt;In the Create Function dialog, specify      the name for the function, say, IpToNum, and click &lt;strong&gt;OK&lt;/strong&gt;. As a result, the      Function Editor associated with the function being created is displayed.&lt;/li&gt;
&lt;li&gt;In
 the Function Editor, move on      to the Parameters tab and add 
parameter IPADD, setting data type to VARCHAR2      and I/O to Input.&lt;/li&gt;
&lt;li&gt;In the Function Editor, move on      to the Implementation tab and edit the function code as follows:         &lt;pre&gt; 
 p NUMBER;
 ipnum NUMBER;
 ipstr VARCHAR2(15);
BEGIN
 ipnum := 0;
 ipstr:=ipadd;
 FOR i IN 1..3 LOOP
  p:= INSTR(ipstr, &#39;.&#39;, 1, 1); 
  ipnum := TO_NUMBER(SUBSTR(ipstr, 1, p - 1))*POWER(256,4-i) + ipnum;
  ipstr := SUBSTR(ipstr, p + 1);
 END LOOP;
 ipnum := ipnum + TO_NUMBER(ipstr);
 RETURN ipnum;
END;&lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;ol start=&quot;5&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      right-click the newly created IPTONUM node and select &lt;strong&gt;Deploy… &lt;/strong&gt;&lt;/li&gt;
&lt;/ol&gt;
Now you can create a mapping in which you then define how data from the source objects will be loaded into the cube:&lt;br /&gt;
     &lt;ol start=&quot;1&quot; type=&quot;1&quot;&gt;
&lt;li&gt;In the Projects Navigator,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL&lt;/strong&gt; node and      right-click &lt;strong&gt;Mappings&lt;/strong&gt;.
 In the popup menu, select New Mapping to launch the      Create Mapping
 dialog. In this dialog, specify the mapping name:      &lt;strong&gt;TRAFFIC_MAP&lt;/strong&gt;. After you click &lt;strong&gt;OK&lt;/strong&gt;, the Mapping Editor canvas should appear.&lt;/li&gt;
&lt;li&gt;To
 accomplish the task of      combining the rows of the access_csv_ext 
and accesslog tables, first drag      and drop the ACCESS_CSV_EXT and 
ACCESSLOG table objects from the Project      Navigator to the Mapping 
Editor canvas. As a result, the operators      representing the above 
tables should appear in the canvas.&lt;/li&gt;
&lt;li&gt;From the Component 
Palette,      drag and drop the Set Operation operator to the mapping 
canvas. Then, in      the Property Inspector, set the Set operation 
property of the operator to      UNION.&lt;/li&gt;
&lt;li&gt;In the mapping 
canvas, connect      the INOUTGRP1 group of the ACCESSLOG operator to 
the INGRP1 group of the      SET OPERATION operator. As a result, all 
corresponding attributes under      those groups will be connected 
automatically.&lt;/li&gt;
&lt;li&gt;Next, connect the OUTGRP1 group      of the ACCESS_CSV_EXT operator to the INGRP2 group of the SET OPERATION      operator.&lt;/li&gt;
&lt;li&gt;The
 next task to accomplish is      joining the traffic statistics data 
with the geographical data. To begin      with, drag and drop the 
GEOLOCATION_CSV_EXT table object from the Project      Navigator to the 
mapping canvas.&lt;/li&gt;
&lt;li&gt;From the Component Palette,      drag 
and drop the Joiner operator to the mapping canvas. Then, connect the   
   OUTGRP1 group of the GEOLOCATION_CSV_EXT operator to the INGRP1 group
 of      the JOINER operator. Next, connect the OUTGRP1 group of the SET
 OPERATION      operator to the INGRP2 group of the JOINER operator.&lt;/li&gt;
&lt;li&gt;In the Projects Navigator,      expand the      &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Transformations-&amp;gt;Functions      &lt;/strong&gt;node and drag and drop the IPTONUM function to the mapping canvas.&lt;/li&gt;
&lt;li&gt;In
 the mapping canvas, select      and delete the line connecting the 
USERIP output attribute of the SET      OPERATION operator with the 
USERIP input attribute of the JOINER operator.      Connect the USERIP 
output attribute of the SET OPERATION operator with the      IPADD input
 attribute of the IPTONUM operator. Then, connect the output      
attribute of the IPTONUM operator with the USERIP input attribute of the
      JOINER operator. You also need to change the data type of the 
JOINER’s      USERIP input attribute for NUMERIC. This can be done on 
the Input      Attributes tab of the Joiner Editor dialog, which you can
 invoke by      double-clicking the header of the JOINER operator.&lt;/li&gt;
&lt;li&gt;In the Joiner Editor dialog,      move on to the Groups tab and add an input group INGRP3. Then, click &lt;strong&gt;OK&lt;/strong&gt; to      close the dialog.&lt;/li&gt;
&lt;li&gt;From
 the Project Navigator,      drag and drop the RESOURCES table object to
 the Mapping Editor canvas. Then,      connect the INOUTGRP1 group of 
the RESOURCES operator with the INGRP3      group of the JOINER 
operator.&lt;/li&gt;
&lt;li&gt;Click the header of the JOINER      operator. Then move onto the JOINER Property Inspector, in which you should click      the &lt;strong&gt;Join Condition &lt;/strong&gt;button. As a result, the Expression Builder dialog      should appear, in which you build the following join condition:         &lt;pre&gt;(INGRP2.USERIP&amp;nbsp; BETWEEN&amp;nbsp; INGRP1.STARTNUM&amp;nbsp; AND&amp;nbsp; INGRP1.ENDNUM)&amp;nbsp; 
AND&amp;nbsp; 
(INGRP2.SITERESOURCE&amp;nbsp; =&amp;nbsp; INGRP3.SITERESOURCE)&lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;ol start=&quot;13&quot; type=&quot;1&quot;&gt;
&lt;li&gt;Next,
 you need to add an      Aggregator that will aggregate the output of 
the Joiner operator. From the      Component Palette, drag and drop the 
Aggregator operator to the mapping      canvas.&lt;/li&gt;
&lt;li&gt;Connect
 the OUTGRP1 group of      the JOINER operator with the INGRP1 group of 
the AGGREGATOR operator.      Then, click the header of the AGGREGATOR 
operator and move on to the      Property Inspector, in which click the 
Ellipsis button to the right of the      Group By Clause field to invoke
 the Expression builder dialog. In this      dialog, specify the 
following group by clause for the aggregator:         &lt;pre&gt;INGRP1.COUNTRYID,INGRP1.SITERESOURCE,INGRP1.DATETIME&lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;ol start=&quot;15&quot; type=&quot;1&quot;&gt;
&lt;li&gt;Double-click
 the header of the      AGGREGATOR operator and move on to the Output 
tab of the dialog, where add      the RESOURCESIZE attribute, specifying
 the following expression for it:      SUM(INGRP1.RESOURCESIZE).&lt;/li&gt;
&lt;li&gt;From
 the Component Palette,      drag and drop the Expression operator to 
the mapping canvas. Then,      double-click the header of the EXPRESSION
 operator and move on to the      Input Attributes tab of the dialog, in
 which define the DATETIME attribute      of type DATE. Then, move on to
 the Output Attributes tab and define the      DAY_START_DAY attribute 
of type DATE, specifying the following expression:         &lt;pre&gt;   
TRUNC(INGRP1.DATETIME, &#39;DD&#39;)&lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;ol start=&quot;17&quot; type=&quot;1&quot;&gt;
&lt;li&gt;Delete
 the line connecting the      DATETIME attribute of the JOINER operator 
with the DATETIME attribute of      the AGGREGATOR operator. Then, 
connect the JOINER’s DATETIME to the      EXPRESSION’s DATETIME and 
connect the EXPRESSION’s DAY_START_DAY to the      AGGREGATOR’s 
DATETIME.&lt;/li&gt;
&lt;li&gt;In the Projects Navigator,      expand the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Cubes&lt;/strong&gt; node and drag and drop the TRAFFIC cube object to the canvas.&lt;/li&gt;
&lt;li&gt;Connect
 the attributes in the      OUTGRP1 group of the AGGREGATOR operator 
with the TRAFFIC operator’s      attributes as follows:         &lt;pre&gt;     
 RESOURCESIZE to OUT_TRAFFIC
 COUNTRYID to GEOGRAPHY_DM_NAME 
 DATETIME to TIME_DM_DAY_START_DATE
 SITERESOURCE to RESOURCE_DM_NAME&lt;/pre&gt;
&lt;/li&gt;
&lt;/ol&gt;
By now the mapping canvas should look like the figure below:&lt;br /&gt;
     &lt;div style=&quot;text-align: center;&quot;&gt;
&amp;nbsp;&lt;img alt=&quot;cube-development-f3&quot; src=&quot;http://www.oracle.com/ocom/groups/public/@otn/documents/digitalasset/359591.gif&quot; /&gt;&lt;/div&gt;
&lt;div align=&quot;center&quot;&gt;
&lt;strong&gt;Figure 3&lt;/strong&gt; The mapping canvas , showing the TRAFFIC_MAP mapping that loads data from the source objects into the cube.&lt;/div&gt;
&lt;ol start=&quot;20&quot; type=&quot;1&quot;&gt;
&lt;li&gt;You are now ready to deploy the      mapping. In the Project Navigator, right-click the &lt;strong&gt;TRAFFIC_MAP&lt;/strong&gt; object      under the &lt;strong&gt;MY_PROJECT-&amp;gt;Databases-&amp;gt;Oracle-&amp;gt;TARGET_MDL-&amp;gt;Mapping&lt;/strong&gt; node and select &lt;strong&gt;Deploy…&lt;/strong&gt; This actually generates&lt;/li&gt;
&lt;li&gt;After
 the deployment has been      successfully completed, you can execute 
the mapping, starting the job for      the ETL logic defined. To do 
this, right-click the &lt;strong&gt;TRAFFIC_MAP&lt;/strong&gt; object and      select &lt;strong&gt;Start…&lt;/strong&gt;&lt;/li&gt;
&lt;/ol&gt;
Once
 you’ve completed the above steps, you have the TRAFFIC cube populated 
with the data from the sources in accordance with the logic implemented 
in the mapping. Practically speaking, however, you rather have the fact 
table (it’s the TRAFFIC_TAB table in this particular example) populated 
with data. In other words, cube records are stored in the fact table. 
The cube itself is just a logical representation or visualization of the
 dimensional data used here.&lt;br /&gt;
     Similarly, dimensions are 
physically bound to corresponding dimension tables, which store 
dimensions’ data in the database. Dimension tables are joined to the 
fact table with foreign keys, making up a model known as a star schema 
(because the diagram of such a schema resembles a star). Oracle 
Database&#39;s query optimizer can apply powerful optimization techniques 
when it comes to star queries (join queries issued against the fact 
table and the dimension tables joined to it), thus providing efficient 
query performance for the queries answering business questions.&lt;br /&gt;
     &lt;h3&gt;
&lt;strong&gt;Conclusion&lt;/strong&gt;&lt;/h3&gt;
Business
 intelligence as the process of gathering information with the purpose 
to support decision making needs a foundation for its environment. A 
data warehouse provides such a foundation being a relational database 
that is designed just for that: consolidating information gathered from 
disparate sources and providing access to that information to business 
users so that they can make better decisions.&lt;br /&gt;
     As you saw in 
this article, a small data warehouse may consist of a single cube and 
just a few dimensions, which make up the edges of that cube. In 
particular, you looked at an example of how traffic statistics data can 
be organized into a cube whose edges contain values for Geography, 
Resource, and Time dimensions.&lt;/div&gt;
</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/2654174067785512480/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/cube-development-for-beginners.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2654174067785512480'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2654174067785512480'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/10/cube-development-for-beginners.html' title='Cube Development for Beginners'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-3402913808572673231</id><published>2012-08-11T18:12:00.000+05:30</published><updated>2012-08-11T18:12:07.441+05:30</updated><category scheme="http://www.blogger.com/atom/ns#" term="Email-Task"/><category scheme="http://www.blogger.com/atom/ns#" term="Failure-Email"/><category scheme="http://www.blogger.com/atom/ns#" term="Informatica"/><category scheme="http://www.blogger.com/atom/ns#" term="Session-Notification"/><category scheme="http://www.blogger.com/atom/ns#" term="Success-Email"/><category scheme="http://www.blogger.com/atom/ns#" term="Workflow-Notification"/><title type='text'>Email task, Session and Workflow notification : Informatica</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
One of the advantages of using ETL Tools is that functionality such 
as monitoring, logging and notification are either built-in or very easy
 to incorporate into your ETL with minimal coding. This Post explains 
the &lt;strong&gt;&lt;em&gt;Email task&lt;/em&gt;&lt;/strong&gt;, which is part of Notification 
framework in Informatica. I have added some guidelines at the end on a 
few standard practices when using email tasks and the reasons behind 
them.&lt;br /&gt;

1. Workflow and session details.&lt;br /&gt;
2. Creating the Email Task (Re-usable)&lt;br /&gt;
3. Adding Email task to sessions&lt;br /&gt;
4. Adding Email Task at the Workflow Level&lt;br /&gt;
5. Emails in the Parameter file (Better maintenance, Good design).&lt;br /&gt;
6. Standard (Good) Practices&lt;br /&gt;
7. Common issues/Questions&lt;br /&gt;

&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;Workflow-and-session-details&quot;&gt;1. Workflow and session details.&lt;/a&gt;&lt;br /&gt;

Here is the sample workflow that I am using. The workflow (wkf_Test) has 2 sessions.&lt;br /&gt;

s_m_T1 : Loads data from Source to Staging table (T1).&lt;br /&gt;
s_m_T2 : Loads data from Staging (T1)  to Target (T2).&lt;br /&gt;

The actual mappings are almost irrevant for this example, but we need
 atleast two sessions to illustrate the different scenarios possible.&lt;br /&gt;

&lt;div class=&quot;wp-caption alignnone&quot; id=&quot;attachment_726&quot; style=&quot;width: 557px;&quot;&gt;
&lt;a href=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/wkf_test.jpg&quot;&gt;&lt;img alt=&quot;Workflow Test with the two sessions.&quot; class=&quot;size-full wp-image-726&quot; height=&quot;188&quot; src=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/wkf_test.jpg&quot; title=&quot;wkf_test&quot; width=&quot;547&quot; /&gt;&lt;/a&gt;&lt;div class=&quot;wp-caption-text&quot;&gt;
Test Workflow (2 Sessions)&lt;/div&gt;
&lt;/div&gt;
&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;creating-the-email-task&quot;&gt;2. Creating the Email Task (Re-usable)&lt;/a&gt;&lt;br /&gt;

Why re-usable?. Becuase we’d be using the same email task for all the sessions in this workflow. &lt;br /&gt;

1. Go to Workflow Manager and connect to the repository and the folder in which your workflow is present.&lt;br /&gt;
2. Go to the Workflow Designer Tab.&lt;br /&gt;
3. Click on Workflow &amp;gt; edit (from the Menu ) and create a workflow variable as below (to hold the failure email address).&lt;br /&gt;

&lt;div class=&quot;wp-caption alignnone&quot; id=&quot;attachment_790&quot; style=&quot;width: 551px;&quot;&gt;
&lt;a href=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/Failure_Email_workflow_variable.jpg&quot;&gt;&lt;img alt=&quot;Failure Email workflow variable&quot; class=&quot;size-full wp-image-790&quot; height=&quot;227&quot; src=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/Failure_Email_workflow_variable.jpg&quot; title=&quot;Failure_Email_workflow_variable&quot; width=&quot;541&quot; /&gt;&lt;/a&gt;&lt;div class=&quot;wp-caption-text&quot;&gt;
Failure Email workflow variable&lt;/div&gt;
&lt;/div&gt;
4. Go to the “Task Developer” Tab and click create from the menu.&lt;br /&gt;
5. Select “Email Task”, enter “Email_Wkf_Test_Failure” for the name 
(since this email task is for different sessions in wkf_test).&lt;br /&gt;
   Click “Create” and then “Done”. Save changes (Repository -&amp;gt; Save or the good old ctrl+S).&lt;br /&gt;
6. Double click on the Email Task and enter the following details in the properties tab.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;xyz&quot; style=&quot;font-family: monospace;&quot;&gt;Email User Name : $$FailureEmail   (Replace the pre-populated session variable $PMFailureUser, 
                                    since we be setting this for each workflow as needed).
Email subject   : Informatica workflow ** WKF_TEST **  failure notification.
Email text      : (see below. Note that the server varibles might be disabled, but will be available during run time).
Please see the attched log for Details. Contact ETL_RUN_AND_SUPPORT@XYZ.COM for further information.
&amp;nbsp;
%g
Folder : %n
Workflow : wkf_test
Session : %s&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
&lt;div class=&quot;wp-caption alignnone&quot; id=&quot;attachment_786&quot; style=&quot;width: 755px;&quot;&gt;
&lt;a href=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/Create_Email_Task1.jpg&quot;&gt;&lt;img alt=&quot;Create Email Task&quot; class=&quot;size-full wp-image-786&quot; height=&quot;265&quot; src=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/Create_Email_Task1.jpg&quot; title=&quot;Create_Email_Task&quot; width=&quot;745&quot; /&gt;&lt;/a&gt;&lt;div class=&quot;wp-caption-text&quot;&gt;
Create_Email_Task&lt;/div&gt;
&lt;/div&gt;
&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;adding-email-tasks-to-sessions&quot;&gt;3. Adding Email task to sessions&lt;/a&gt;&lt;br /&gt;

7. Go to the Workflow Tab and double click on session s_m_T1. You should see the “edit task” window.&lt;br /&gt;
8. Make sure you have “Fail parent if this task fails” in the general tab and the “stop on errors” is 1 on the config tab.&lt;br /&gt;
   Go to “Components” tab.&lt;br /&gt;
9. For the on-failure email section, select “reusable” for type and click the LOV on Value.&lt;br /&gt;
10. Select the email task that we just created (Email_Wkf_Test_Failure), and click OK.&lt;br /&gt;

&lt;div class=&quot;wp-caption alignnone&quot; id=&quot;attachment_788&quot; style=&quot;width: 694px;&quot;&gt;
&lt;a href=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/s_m_T1_adding_email_Task1.jpg&quot;&gt;&lt;img alt=&quot;Adding Email Task to a session&quot; class=&quot;size-full wp-image-788&quot; height=&quot;410&quot; src=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/s_m_T1_adding_email_Task1.jpg&quot; title=&quot;s_m_T1_adding_email_Task&quot; width=&quot;684&quot; /&gt;&lt;/a&gt;&lt;div class=&quot;wp-caption-text&quot;&gt;
Adding Email Task to a session&lt;/div&gt;
&lt;/div&gt;
&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;adding-email-tasks-at-workflow-level&quot;&gt;4. Adding Email Task at the Workflow Level&lt;/a&gt;&lt;br /&gt;

&lt;strong&gt;Workflow-level failure/suspension email&lt;/strong&gt;.&lt;br /&gt;

If you are already implementing the failure email for each session 
(and getting the session log for the failed session), then you should 
consider just suspending the workflow. If you don’t need session level 
details, using the workflow suspension email makes sense.&lt;br /&gt;

There are two settings you need to set for Failure notification emails at workflow level.&lt;br /&gt;
  a) Suspend on error (Check)&lt;br /&gt;
  b) Suspension email (Select the email task as before). &lt;strong&gt;&lt;em&gt;Again,
 remember that if you have both session and workflow level emails, 
you’ll get two emails, if a session fails and causes the parent to fail&lt;/em&gt;&lt;/strong&gt;.&lt;br /&gt;

&lt;div class=&quot;wp-caption alignnone&quot; id=&quot;attachment_797&quot; style=&quot;width: 642px;&quot;&gt;
&lt;a href=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/workflow_suspension_email3.jpg&quot;&gt;&lt;img alt=&quot;Informatica workflow suspension email&quot; class=&quot;size-full wp-image-797&quot; height=&quot;455&quot; src=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/workflow_suspension_email3.jpg&quot; title=&quot;workflow_suspension_email&quot; width=&quot;632&quot; /&gt;&lt;/a&gt;&lt;div class=&quot;wp-caption-text&quot;&gt;
Informatica workflow suspension email&lt;/div&gt;
&lt;/div&gt;
&lt;strong&gt;Workflow Sucesss email&lt;/strong&gt;&lt;br /&gt;

In some cases, you might have a requirement to add a success email once the entire workflow is complete.&lt;br /&gt;
This helps people know the workflow status for the day without having to
 access workflow monitor or asking run teams for the status each day. 
This is particularly helpful for business teams who are more concerned 
whether the process completed for the day.&lt;br /&gt;

1) Go to the workflow tab in workflow manager and click Task &amp;gt; Create &amp;gt; Email Task.&lt;br /&gt;
2) Enter the name of the email task and click OK.&lt;br /&gt;
3) In the general tab, select “Fail parent if this task fails”. In the properties tab, add the necessary details&lt;br /&gt;
   Note that the variables are not available anymore, since they are only applicable at the session level.&lt;br /&gt;
4) Add the necessary Session.status=”succeedeed” for all the preceding tasks.&lt;br /&gt;

Here’s how your final workflow will look.&lt;br /&gt;

&lt;div class=&quot;wp-caption alignnone&quot; id=&quot;attachment_784&quot; style=&quot;width: 805px;&quot;&gt;
&lt;a href=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/success_emails.jpg&quot;&gt;&lt;img alt=&quot;Success Emails&quot; class=&quot;size-full wp-image-784&quot; height=&quot;196&quot; src=&quot;http://www.etl-developer.com/wp-content/uploads/2011/12/success_emails.jpg&quot; title=&quot;success_emails&quot; width=&quot;795&quot; /&gt;&lt;/a&gt;&lt;div class=&quot;wp-caption-text&quot;&gt;
Informatica success emails&lt;/div&gt;
&lt;/div&gt;
&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;emails-in-parameter-files&quot;&gt;5. Emails in the Parameter file (Better maintenance, Good design).&lt;/a&gt;&lt;br /&gt;

We’ve created the workflow variable $$FailureEmail and used it in the email task. But how and when is the value assigned?&lt;br /&gt;
You can manage the failure emails by assigning the value in the &lt;strong&gt;parameter file&lt;/strong&gt;.&lt;br /&gt;
Here is my parameter file for this example. &lt;strong&gt;&lt;em&gt;You can seperate multiple emails using comma&lt;/em&gt;&lt;/strong&gt;.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;xyz&quot; style=&quot;font-family: monospace;&quot;&gt;infa@ DEV /&amp;gt; cat wkf_test.param
[rchamarthi.WF:wkf_Test]
$$FailureEmail=rajesh@etl-developer.com
&amp;nbsp;
[rchamarthi.WF:wkf_Test.ST:s_m_T1]
$DBConnection_Target=RC_ORCL102
&amp;nbsp;
[rchamarthi.WF:wkf_Test.ST:s_m_T2]
$DBConnection_Target=RC_ORCL102&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
While it might look like a simpler approach initially, hard-coding emails IDs in the email task is a bad idea. Here’s why.&lt;br /&gt;

Like every other development cycle, Informatica ETLs go thorugh Dev, 
QA and Prod and the failure email for each of the environment will be 
different. When you promote components from Dev to QA and then to Prod, 
everything from Mapping to Session to Workflow should be identical in 
all environments. Anything that changes or might change should be 
handled using parameter files (similar to env files in Unix). This also 
works the other way around. When you copy a workflow from Production to 
Development and try to make changes, the failure emails will not go to 
business users or QA teams as the development parameter file only has 
the developer email Ids.&lt;br /&gt;

If you use parameter files, here is how it would be set up in different environments once.&lt;br /&gt;
After the initial set up, you’ll hardly change it in QA and Prod and migrations will never screw this up.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;xyz&quot; style=&quot;font-family: monospace;&quot;&gt;In development   : $$FailureEmail=developer1@xyz.com,developer2@xyz.com&quot;
In QA / Testing  : $$FailureEmail=r=developer1@xyz.com,developer2@xyz.com,QA_TEAM@xyz.com
In Production    : $$FailureEmail=IT_OPERATIONS@xyz.com,ETL_RUN@xyz.com,BI_USERS@xyz.com&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;email-task-best-practices&quot;&gt;6. Standard (Good) Practices&lt;/a&gt;&lt;br /&gt;

These are some of the standard practices related to Email Tasks that I would recommend. The reasons have been explained above.&lt;br /&gt;

a) Reusable email task that is used by all sessions in the workflow.&lt;br /&gt;
b) Suspend on error set at the workflow level and failure email specified for each session.&lt;br /&gt;
c) Fail parent if this task fails (might not be applicable in 100% of the cases).&lt;br /&gt;
c) Workflow Success email (based on requirement).&lt;br /&gt;
d) Emails mentioned only in the parameter file. (No Hard-coding). &lt;br /&gt;

&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;email-task-common-issues&quot;&gt;7. Common issues/Questions&lt;/a&gt;&lt;br /&gt;

&lt;strong&gt;Warning unused variable $$FailureEmail and/or No failure emails: &lt;/strong&gt;&lt;br /&gt;

Make sure you use the double dollar sign, as all user-defined 
variables should. (unless you are just using the integration service 
variable $PMFailureEmailUser). Once that is done, the reason for the 
above warning and/or no failure email could be…&lt;br /&gt;
 a) You forgot to declare the workflow variable as described in step 3 above or&lt;br /&gt;
 b) the workflow parameter file is not being read correctly. (wrong 
path, no read permissions, invalid parameter file entry etc.)&lt;br /&gt;
Once you fix these two, you should be able to see the success and failure emails as expected.&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/3402913808572673231/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/email-task-session-and-workflow.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3402913808572673231'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3402913808572673231'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/email-task-session-and-workflow.html' title='Email task, Session and Workflow notification : Informatica'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-1027186745072774431</id><published>2012-08-11T18:10:00.000+05:30</published><updated>2012-08-11T18:10:03.190+05:30</updated><title type='text'>Informatica Unable to fetch log</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Quite often, you might come across the following error when you try to get the session  log for your session.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;xyz&quot; style=&quot;font-family: monospace;&quot;&gt;Unable to Fetch Log.
The Log Service has no record of the requested session or workflow run.&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
The first place to start debugging this error would be one-level up from the session log, which is the &lt;strong&gt;workflow log&lt;/strong&gt;.&lt;br /&gt;

The most common reasons I have seen this happen is because of the following .&lt;br /&gt;

a )  One or more of the following parameters have been specified incorrectly.&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;Session Log File directory&lt;/li&gt;
&lt;li&gt;Session Log File Name&lt;/li&gt;
&lt;li&gt;Parameter Filename (at the session (and/or) workflow level)&lt;/li&gt;
&lt;/ul&gt;
b) You do not have the necessary privileges on the directory to create and modify (log) files.&lt;br /&gt;

Whatever be the case , the workflow log is your next point of 
debugging. In my test scenario , I entered the following parameters for 
the log file name and directory  to simulate this error.&lt;br /&gt;

Session Log File directory : $InvalidLogDir\&lt;br /&gt;

Session Log File Name      : s_m_test_cannot_fetch_log.log&lt;br /&gt;

When I ran the workflow, the session failed and I could not get the 
session log (becuase it was never created) . The error in the workflow 
log is as follows.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;xyz&quot; style=&quot;font-family: monospace;&quot;&gt;Session task instance [s_m_test_cannot_fetch_log] : 
[CMN_1053 [LM_2006] Unable to create log file
[$InvalidLogDir/download/INFA/QuickHit/ParmFiles/s_m_test_cannot_fetch_log.log.bin].&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
There seems to be too much guesswork to fix this error based on the 
posts on internet forums . Somehow, developers seem to think of this 
error as something wrong with the Informatica client installation.&lt;br /&gt;

The next time you get this error, &lt;span style=&quot;text-decoration: underline;&quot;&gt;&lt;strong&gt;please check your workflow log&lt;/strong&gt;&lt;/span&gt;.&lt;br /&gt;

If you have seen this happen before for another reproducible case , 
please comment and I’ll modify the post to include the same if needed.&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/1027186745072774431/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/informatica-unable-to-fetch-log.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/1027186745072774431'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/1027186745072774431'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/informatica-unable-to-fetch-log.html' title='Informatica Unable to fetch log'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-2206118637317405209</id><published>2012-08-11T18:09:00.002+05:30</published><updated>2012-08-11T18:09:25.638+05:30</updated><title type='text'>Informatica Workflow Successful : No Data in target !</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
This is a frequently asked in the Informatica forums and the solution
 is usually pretty simple. However, that will have to wait till the end 
because there is one important thing that you should know before you go 
ahead and fix the problem.&lt;br /&gt;

&lt;strong&gt;Your workflow should have failed in the first place.&lt;/strong&gt;
 If this was in Production, Support Teams should know that something 
Failed. Report users should know the data in the Marts is not ready for 
reporting. Dependent workflows should wait until this is resolved. This 
coding practice basically violates the Age-old Principle of fail-fast when something goes wrong, instead of continuing flawed execution pretending “All is well”, causing the toughest-to-debug defects.&lt;br /&gt;

Of Course, this is not specific to Informatica. It is not uncommon to
 see code in other languages which follows this pattern. The only issue 
that is specific to Informatica is that this is the default behavior 
when you create a session. So you might have this “bug” in your code 
without even knowing it. &lt;br /&gt;

&lt;blockquote&gt;
Stop On Errors:&lt;br /&gt;

Indicates how many non-fatal errors the Integration Service can 
encounter before it stops the session. Non-fatal errors include reader, 
writer, and DTM errors. Enter the number of non-fatal errors you want to
 allow before stopping the session. The Integration Service maintains an
 independent error count for each source, target, and transformation. If
 you specify 0, non-fatal errors do not cause the session to stop.&lt;br /&gt;
Optionally use the $PMSessionErrorThreshold service variable to stop on 
the configured number of errors for the Integration Service. &lt;/blockquote&gt;
In Oracle, it is the infamous “when others then null” .&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;sql&quot; style=&quot;font-family: monospace;&quot;&gt;&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;BEGIN&lt;/span&gt;
  &lt;span style=&quot;color: #66cc66;&quot;&gt;&amp;lt;&lt;/span&gt;process &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SOME&lt;/span&gt; Data&lt;span style=&quot;color: #66cc66;&quot;&gt;&amp;gt;&lt;/span&gt;
exception
   &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHEN&lt;/span&gt; others 
       &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;THEN&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;NULL&lt;/span&gt;;  
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;END&lt;/span&gt;;
&lt;span style=&quot;color: #66cc66;&quot;&gt;/&lt;/span&gt;&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
In Java..Something like..&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;java&quot; style=&quot;font-family: monospace;&quot;&gt;&lt;span style=&quot;color: black; font-weight: bold;&quot;&gt;try&lt;/span&gt; &lt;span style=&quot;color: #009900;&quot;&gt;{&lt;/span&gt;
   fooObject.&lt;span style=&quot;color: #006633;&quot;&gt;doSomething&lt;/span&gt;&lt;span style=&quot;color: #009900;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #009900;&quot;&gt;)&lt;/span&gt;&lt;span style=&quot;color: #339933;&quot;&gt;;&lt;/span&gt;
&lt;span style=&quot;color: #009900;&quot;&gt;}&lt;/span&gt;
&lt;span style=&quot;color: black; font-weight: bold;&quot;&gt;catch&lt;/span&gt; &lt;span style=&quot;color: #009900;&quot;&gt;(&lt;/span&gt; &lt;span style=&quot;color: #003399;&quot;&gt;Exception&lt;/span&gt; e &lt;span style=&quot;color: #009900;&quot;&gt;)&lt;/span&gt; &lt;span style=&quot;color: #009900;&quot;&gt;{&lt;/span&gt;
   &lt;span style=&quot;color: #666666; font-style: italic;&quot;&gt;// do nothing&lt;/span&gt;
&lt;span style=&quot;color: #009900;&quot;&gt;}&lt;/span&gt;&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
The solution to this problem in Informatica is to set a limit on the 
number of allowed errors for a given session using one of the following 
methods.&lt;br /&gt;
a) Having “1″ in your default session config : Fail the session on the first non-fatal error.&lt;br /&gt;
b) Over-write the session Configuration details and enter the “Stop On Errors” to “1″ or a fixed number.&lt;br /&gt;
c) Use the $PMSessionErrorThreshold variable and set it at the 
integration service level. You can always override the variable in the 
parameter file. Take a look at this Article on how you can do that.&lt;br /&gt;

&lt;strong&gt;Remember, if your sessions do not belong to one of these categories, you are doing it wrong!&lt;/strong&gt;.&lt;br /&gt;
a) Your session Fails and Causes the workflow to fail whenever any errors occur.&lt;br /&gt;
b) You allow the session to continue despite some (expected) errors, but
 you always send the .bad file and the log file to the support/business 
team in charge.&lt;br /&gt;

&lt;a href=&quot;http://www.blogger.com/blogger.g?blogID=4025024079216879898&quot; name=&quot;Solution&quot;&gt;Why is there no data in Target&lt;/a&gt; &lt;br /&gt;

The solution to “why the records didn’t make it to the target” is 
usually pretty evident in the session log file. The usual case (based on
 most of the times this question is asked) is becuase all of your 
records are failing with some non-fatal error.&lt;br /&gt;

The only point of this article is to remind you that your code has to
 notify the right people when the workflow did not run as planned.&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/2206118637317405209/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/informatica-workflow-successful-no-data.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2206118637317405209'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2206118637317405209'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/informatica-workflow-successful-no-data.html' title='Informatica Workflow Successful : No Data in target !'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-8901814680968917808</id><published>2012-08-11T18:08:00.003+05:30</published><updated>2012-08-11T18:08:22.657+05:30</updated><title type='text'>ORA-01403: no data found</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Pretty common oracle error. Raised when you are trying to fetch data 
from sql into a pl/sl variable and the sql does not return any data.&lt;br /&gt;

&amp;gt;&amp;gt; Using the data from this schema&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;sql&quot; style=&quot;font-family: monospace;&quot;&gt;&amp;nbsp;
SQL&lt;span style=&quot;color: #66cc66;&quot;&gt;&amp;gt;&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;COUNT&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;*&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;
  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FROM&lt;/span&gt; scott_emp
  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHERE&lt;/span&gt; empno &lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; &lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;;
&amp;nbsp;
  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;COUNT&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;*&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;
&lt;span style=&quot;color: grey; font-style: italic;&quot;&gt;----------&lt;/span&gt;
         &lt;span style=&quot;color: #cc66cc;&quot;&gt;0&lt;/span&gt;
&amp;nbsp;
SQL&lt;span style=&quot;color: #66cc66;&quot;&gt;&amp;gt;&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;DECLARE&lt;/span&gt;
   l_ename scott_emp&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;ename%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;;
   l_empno scott_emp&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;empno%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt; :&lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; &lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;BEGIN&lt;/span&gt;
   &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; ename
     &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;INTO&lt;/span&gt; l_ename
     &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FROM&lt;/span&gt; scott_emp
     &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHERE&lt;/span&gt; empno &lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; l_empno;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;END&lt;/span&gt;;
&lt;span style=&quot;color: #66cc66;&quot;&gt;/&lt;/span&gt;
&amp;nbsp;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;DECLARE&lt;/span&gt;
&lt;span style=&quot;color: #66cc66;&quot;&gt;*&lt;/span&gt;
ERROR at line &lt;span style=&quot;color: #cc66cc;&quot;&gt;1&lt;/span&gt;:
ORA&lt;span style=&quot;color: #66cc66;&quot;&gt;-&lt;/span&gt;01403: no &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;DATA&lt;/span&gt; found
ORA&lt;span style=&quot;color: #66cc66;&quot;&gt;-&lt;/span&gt;06512: at line &lt;span style=&quot;color: #cc66cc;&quot;&gt;5&lt;/span&gt;&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
&lt;em&gt;&lt;strong&gt;What to do next&lt;/strong&gt;&lt;/em&gt;&lt;br /&gt;

1. Re-Raise it with a error message that provides more context.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;sql&quot; style=&quot;font-family: monospace;&quot;&gt;&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;DECLARE&lt;/span&gt;
   l_ename scott_emp&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;ename%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;;
   l_empno scott_emp&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;empno%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt; :&lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; &lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;BEGIN&lt;/span&gt;
   &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; ename
     &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;INTO&lt;/span&gt; l_ename
     &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FROM&lt;/span&gt; scott_emp
     &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHERE&lt;/span&gt; empno &lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; l_empno;
EXCEPTION
  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHEN&lt;/span&gt; no_data_found
   &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;THEN&lt;/span&gt; raise_application_error&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;-&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;20001&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;,&lt;/span&gt;&lt;span style=&quot;color: red;&quot;&gt;&#39;No employee exists with employee id &#39;&lt;/span&gt; &lt;span style=&quot;color: #66cc66;&quot;&gt;||&lt;/span&gt; l_empno&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;END&lt;/span&gt;;
&lt;span style=&quot;color: #66cc66;&quot;&gt;/&lt;/span&gt;
ERROR at line &lt;span style=&quot;color: #cc66cc;&quot;&gt;1&lt;/span&gt;:
ORA&lt;span style=&quot;color: #66cc66;&quot;&gt;-&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;20001&lt;/span&gt;: No employee &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;EXISTS&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WITH&lt;/span&gt; employee id &lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;
ORA&lt;span style=&quot;color: #66cc66;&quot;&gt;-&lt;/span&gt;06512: at line &lt;span style=&quot;color: #cc66cc;&quot;&gt;11&lt;/span&gt;&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
2. Suppress the error if this is a valid business scenario and do the necessary processing.&lt;br /&gt;
   Example CASE : If a user has a preference to display the numbers in local currency, convert the amount, else, display in USD.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;sql&quot; style=&quot;font-family: monospace;&quot;&gt;&amp;nbsp;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;CREATE&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;OR&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;REPLACE&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;PROCEDURE&lt;/span&gt; p_calc_sales_metrics&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;
   p_user_id &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;IN&lt;/span&gt; users&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;user_id%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;,&lt;/span&gt;
   p_profit  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;IN&lt;/span&gt; net_sales&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;profit%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;
&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;AS&lt;/span&gt;
  l_pref_currency user_prefs&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;pref_currency%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;;
  l_profit_local_amt net_sales&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;profit%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;BEGIN&lt;/span&gt;
&amp;nbsp;
&lt;span style=&quot;color: grey; font-style: italic;&quot;&gt;---other code&lt;/span&gt;
 &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;BEGIN&lt;/span&gt;
&amp;nbsp;
 &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; pref_currency
   &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;INTO&lt;/span&gt; l_pref_currency
  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHERE&lt;/span&gt; user_id &lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; p_user_id
&amp;nbsp;
        l_cur_conv_factor :&lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; get_conv_rate&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: red;&quot;&gt;&#39;USD&#39;&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;,&lt;/span&gt;l_pref_currency&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;;
&amp;nbsp;
&amp;nbsp;
 exception
   &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHEN&lt;/span&gt; no_data_found 
    &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;THEN&lt;/span&gt; l_cur_conv_factor :&lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; &lt;span style=&quot;color: #cc66cc;&quot;&gt;1&lt;/span&gt;;
 &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;END&lt;/span&gt;;
&amp;nbsp;
&lt;span style=&quot;color: grey; font-style: italic;&quot;&gt;--- other code..&lt;/span&gt;
&amp;nbsp;
 l_profit_local_amt :&lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt;  p_profit &lt;span style=&quot;color: #66cc66;&quot;&gt;*&lt;/span&gt; l_cur_conv_factor;
&amp;nbsp;
&amp;nbsp;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;END&lt;/span&gt;;
&lt;span style=&quot;color: #66cc66;&quot;&gt;/&lt;/span&gt;&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
3. Functions, by design, do not raise the NO_DATA_FOUND exception, instead they return null to the calling program.&lt;br /&gt;


&lt;div class=&quot;wp_syntax&quot;&gt;
&lt;div class=&quot;code&quot;&gt;
&lt;pre class=&quot;sql&quot; style=&quot;font-family: monospace;&quot;&gt;&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;CREATE&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;OR&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;REPLACE&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FUNCTION&lt;/span&gt; STGDATA&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;f_get_ename&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;
   i_empno &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;IN&lt;/span&gt; scott_emp&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;empno%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;
&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;RETURN&lt;/span&gt; scott_emp&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;ename%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;AS&lt;/span&gt;
  l_ename scott_emp&lt;span style=&quot;color: #66cc66;&quot;&gt;.&lt;/span&gt;ename%&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;TYPE&lt;/span&gt;;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;BEGIN&lt;/span&gt;
&amp;nbsp;
  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; ename
    &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;INTO&lt;/span&gt; l_ename
    &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FROM&lt;/span&gt; scott_emp
   &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;WHERE&lt;/span&gt; empno &lt;span style=&quot;color: #66cc66;&quot;&gt;=&lt;/span&gt; i_empno;
&amp;nbsp;
  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;RETURN&lt;/span&gt; l_ename;  
&amp;nbsp;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;END&lt;/span&gt;;
&lt;span style=&quot;color: #66cc66;&quot;&gt;/&lt;/span&gt;
&amp;nbsp;
SQL&lt;span style=&quot;color: #66cc66;&quot;&gt;&amp;gt;&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; f_get_ename&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;7839&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FROM&lt;/span&gt; dual;
&amp;nbsp;
F_GET_ENAME&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;7839&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;
&lt;span style=&quot;color: grey; font-style: italic;&quot;&gt;---------------------&lt;/span&gt;
KING
&amp;nbsp;
SQL&lt;span style=&quot;color: #66cc66;&quot;&gt;&amp;gt;&lt;/span&gt;  &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; f_get_ename&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FROM&lt;/span&gt; dual;
&amp;nbsp;
F_GET_ENAME&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;
&lt;span style=&quot;color: grey; font-style: italic;&quot;&gt;-------------------------------------------&lt;/span&gt;
&amp;nbsp;
&amp;nbsp;
SQL&lt;span style=&quot;color: #66cc66;&quot;&gt;&amp;gt;&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;SELECT&lt;/span&gt; nvl&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;f_get_ename&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;,&lt;/span&gt;&lt;span style=&quot;color: red;&quot;&gt;&#39;NULL RETURNED&#39;&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt; &lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;FROM&lt;/span&gt; dual;
&amp;nbsp;
NVL&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;F_GET_ENAME&lt;span style=&quot;color: #66cc66;&quot;&gt;(&lt;/span&gt;&lt;span style=&quot;color: #cc66cc;&quot;&gt;9999&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;,&lt;/span&gt;&lt;span style=&quot;color: red;&quot;&gt;&#39;NULLRETURNED&#39;&lt;/span&gt;&lt;span style=&quot;color: #66cc66;&quot;&gt;)&lt;/span&gt;
&lt;span style=&quot;color: grey; font-style: italic;&quot;&gt;------------------------------------------------&lt;/span&gt;
&lt;span style=&quot;color: #993333; font-weight: bold;&quot;&gt;NULL&lt;/span&gt; RETURNED&lt;/pre&gt;
&lt;/div&gt;
&lt;/div&gt;
&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/8901814680968917808/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/ora-01403-no-data-found.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/8901814680968917808'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/8901814680968917808'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/ora-01403-no-data-found.html' title='ORA-01403: no data found'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-2913706740521300094</id><published>2012-08-10T13:26:00.000+05:30</published><updated>2012-08-10T13:33:55.079+05:30</updated><title type='text'>Introducing Informatica Cloud 9 – The Defining Capability For Cloud Computing</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Today we made an announcement called Informatica Cloud 9.&amp;nbsp; This is the culmination of many years of hard work and effort and builds on the Informatica 9 announcement we made last week.&amp;nbsp; So what is so special about Informatica Cloud 9?&amp;nbsp; Is it the new Platform-as-a-Service offering?&amp;nbsp; Or it is the new Cloud Services we delivered?&amp;nbsp; Or is it the new capabilities on Amazon EC2?&amp;nbsp; What are all these things and why are they important?
&lt;br /&gt;
Let me explain:&lt;span id=&quot;more-566&quot;&gt;&lt;/span&gt;&lt;br /&gt;
Informatica Cloud 9 started over four years ago when we noticed the 
beginnings of a revolution happening around us – namely Cloud 
Computing.&amp;nbsp; Many of you may not be familiar with our work in the Cloud, 
but we have been very focused on delivering data integration as a 
“Software-as-a-Service” (SaaS) solution.&amp;nbsp; This has meant taking our 
enterprise class capabilities and simplifying the interface to an extent
 where a simple non-technical business user can point-and-click to 
connect a cloud application with an on-premise application.&lt;br /&gt;
We have always believed that this is critical to realize since 
business users typically put off the integration tasks because they 
don’t want to rely on IT for time or resources. So we wanted to make it 
incredibly easy for the business user to do this on their own.&amp;nbsp; We 
focused on salesforce.com and built data movement and then data 
synchronization capabilities.&amp;nbsp; Indeed, our Data Loader Service for 
salesforce.com was voted the best data integration solution on their 
AppExchange this year.&lt;br /&gt;
However, our belief is that while business users must be able to 
define an integration task, their IT colleagues should be able to see 
the same integration processes from within their environment.&amp;nbsp; Only then
 will the business user be able to truly bring new applications into 
critical usage.&amp;nbsp; The same needs to be true the other way around – we 
want to be able to define complex integrations and enable business users
 to be able to run them through an easy-to-use browser interface.&amp;nbsp; Only 
then will the CFO, and others, be confident that they can trust the data
 being deployed across public and private clouds and begin to embrace 
cloud computing for core business requirements.&amp;nbsp; We call this Business-IT collaboration and it was a big part of last week’s Informatica 9 announcement as well.&lt;br /&gt;
Cloud computing is re-defining IT and data integration needs to follow-suit.&amp;nbsp; &lt;b&gt;It is data integration that will be THE defining capability for cloud computing&lt;/b&gt;
 – not outsourced datacenters, or sexy new application solutions.&amp;nbsp; So, 
to really embrace cloud computing, one needs a whole new way of 
delivering enterprise data integration that brings together the ease of 
use that business users require with the sophistication that must be 
delivered for IT architects. Otherwise cloud computing will simply 
remain the domain of non-critical fancy-looking applications on the 
periphery of true enterprise business requirements.&lt;br /&gt;
Informatica Cloud 9 is a significant step forward in solving this 
problem of providing data integration in the clouds.&amp;nbsp; With today’s 
announcement, anyone who is involved with Informatica can build, share 
and deploy any data integration components and deploy them anywhere. 
These components may relate to data quality or data integration or 
indeed, with time, any of the other components that make up the 
Informatica 9 Platform. After all, Informatica Cloud 9 is built on the 
comprehensive and unified Informatica 9 and therefore will eventually 
inherit the core capabilities of the platform.&lt;br /&gt;
Informatica Cloud 9 delivers on this belief and provides three critical components towards that goal:&lt;br /&gt;
&lt;ul&gt;
&lt;li&gt;With Data Quality Cloud Edition and PowerCenter Cloud Edition on 
Amazon EC2 we are providing a low-cost hourly build capability for IT 
users. With this, developers can build complex integrations between 
applications that can be published to non-technical line-of-business 
managers to consume and manage.&amp;nbsp; Indeed any of the 50,000+ developers on
 the Informatica TechNet, or any of our Systems Integration partners 
will be able to do this.&amp;nbsp; These integrations can be thought of as 
templates – picked up by anyone using the Informatica Cloud 9 Platform 
and re-deployed.&lt;/li&gt;
&lt;li&gt;With the Informatica Cloud 9 Platform-as-a-Service we are providing 
the multi-tenant, scalable enterprise engine for deploying data 
integration in the clouds. One note that you may not be familiar with – 
we are already running over 17,000 jobs a day through our multi-tenant 
Informatica Cloud Services and moving over three Billion rows a month of
 client data.&lt;/li&gt;
&lt;li&gt;With our new Informatica Cloud 9 services we are enhancing our own 
simple-to-use suite of data integration cloud applications that continue
 to evolve the role of the business user to be self-sufficient in their 
approach to accessing and integrating trustworthy cloud-based and 
on-premise data.&lt;/li&gt;
&lt;/ul&gt;
Now an enterprise can enable business users to use any cloud 
application and remain in control of their most critical asset – their 
data.&amp;nbsp; Developers can share re-usable templates across the business; 
System Integrators can build data integration templates for specific 
cloud and on-premise applications and deploy them across to their 
clients; consultants can move from client to client with toolboxes of 
pre-configured templates.&lt;br /&gt;
Informatica Cloud 9 is the evolution of enterprise data integration to the clouds.&amp;nbsp; Take a few moments please to re-read the press release and, in particular, the quotes therein:&lt;br /&gt;
&lt;ul&gt;
&lt;li&gt;“Informatica Cloud 9 will dramatically simplify cloud-to-cloud and cloud to on-premise data integrations…”&lt;/li&gt;
&lt;li&gt;“… the ability to develop more complex mappings and workflows and 
run them as custom services for line of business managers will allow us 
to continue to provide self-service, while IT remains in control…”&lt;/li&gt;
&lt;li&gt;“… we’ve developed an SAP data integration as a service solution…”&lt;/li&gt;
&lt;li&gt;“… we plan to develop re-usable templates to accelerate time to market and reduce total cost of ownership for our customers… “&lt;/li&gt;
&lt;li&gt;“Informatica Cloud Platform gives us the power and flexibility to 
meet enterprise requirements and deliver solutions to non-technical 
business users …”&lt;/li&gt;
&lt;/ul&gt;
Hopefully now you can see why we are all on Informatica Cloud 9 here!&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/2913706740521300094/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/0-introducing-informatica-cloud-9.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2913706740521300094'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2913706740521300094'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/0-introducing-informatica-cloud-9.html' title='Introducing Informatica Cloud 9 – The Defining Capability For Cloud Computing'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-3552547039145228439</id><published>2012-08-10T13:24:00.004+05:30</published><updated>2012-08-10T13:24:37.277+05:30</updated><title type='text'>How Big Data Changes Data Integration</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
With Big Data systems now in the mix within most enterprises, those 
charged with data integration are interested in how their world will 
soon change. Rest assured, most of the patterns of integration that we 
deal with today will still be around for years to come.&lt;br /&gt;

However, there are some clear trends that data integration managers need to understand, such as:&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;The ability to imply structure to the data at the time of use.&lt;/li&gt;
&lt;li&gt;The ability to store both structured and unstructured data.&lt;/li&gt;
&lt;li&gt;The need for faster data integration technology.&lt;span id=&quot;more-10592&quot;&gt;&lt;/span&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;strong&gt;&lt;em&gt;The ability to imply structure to the data at the time of use&lt;/em&gt;&lt;/strong&gt;
 refers to the fact that Big Data systems using the Hadoop set of 
technologies have the ability to add a structure at the time of use. 
Thus, you don’t need to pre-define a structure as we do in the world of 
relational data, you can map a structure to existing data.&lt;br /&gt;

While this has certain advantages, such as the ability to create 
dynamic structure around in-line analytical services, this also causes 
some complexity when dealing with data integration technology. Most data
 integration technology leverages some type of structure on either end 
of the integration flow. The idea is that you need to layer a structure 
as the data is consumed, translated, and produced from one system or 
data store to another.&lt;br /&gt;

&lt;strong&gt;&lt;em&gt;The ability to store both structured and unstructured data&lt;/em&gt;&lt;/strong&gt;,
 as related to the layering in a dynamic structure, brings both 
complexity and flexibility. Big Data systems are basically file systems 
with anything and everything stored in them. This means that documents, 
text, and data are all intermingled. This information may be bound to a 
structure, or freestanding.&amp;nbsp; In any event, you need to provide the 
ability to move both structured and unstructured data from store to 
store.&lt;br /&gt;

&lt;strong&gt;&lt;em&gt;The need for faster data integration technology&lt;/em&gt;&lt;/strong&gt;
 is a result of the fact that we deal with much larger volumes of data 
than more traditional enterprise systems. Therefore, there is more data 
that has to be moved from data store to data store. Thus, there is a 
renewed focus on data integration technology’s ability to keep up with 
the data integration performance requirements.&lt;br /&gt;

In many respects, the ability to create a data integration solution 
that is able to move larger volumes of structured and unstructured data 
between data stores is dependent upon the way you’ve designed the data 
integration flows, as much as the data integration technology itself. As
 Big Data systems move into your enterprise, and you join them together 
using data integration technology, you’ll find that the patterns of the 
integration flows need to change as well. Before these systems are put 
into production, it’s a good idea to review what needs to change and 
best practices around the design of the integration flows.&lt;br /&gt;

Big Data is more of an evolution around the way we store and deal 
with data. It provides more primitive commodity mechanisms that provide 
more flexibility and the ability to deal with larger amounts of data 
using highly distributed data management technology. Data integration 
technology needs to adapt to this change, which is further reaching than
 anything we’ve seen of late.&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/3552547039145228439/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/how-big-data-changes-data-integration.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3552547039145228439'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/3552547039145228439'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/how-big-data-changes-data-integration.html' title='How Big Data Changes Data Integration'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-8751860416581794098</id><published>2012-08-10T13:22:00.001+05:30</published><updated>2012-08-10T13:23:10.222+05:30</updated><title type='text'>How Integration Platform-as-a-Service Impacts Cloud Adoption</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Did you know that Forrester estimates in their 10 Cloud Predictions For 2012
 blog post that on average organizations will be running more than 10 
different cloud applications and that the public Software-as-a-Service 
(SaaS) market will hit $33 billion by the end of 2012?&lt;br /&gt;
However, in the same post, Forrester also acknowledged that SaaS 
adoption is led mainly by Customer Relationship Management (CRM), 
procurement, collaboration, and Human Capital Management (HCM) software 
and that all other software segments will “still have significantly 
lower SaaS adoption rates”. It’s not hard to see this in the market 
today, with cloud juggernaut salesforce.com leading the way in CRM,
 and Workday and SuccessFactors doing battle in HCM, for example. 
Forrester claims that amongst the lesser known software segments, 
Product Lifecycle Management (PLM), Business Intelligence (BI), and 
Supply Chain Management (SCM) will be the categories to break through as
 far as SaaS adoption is concerned, with approximately 25% of companies using these solutions by 2012.&lt;span id=&quot;more-10643&quot;&gt;&lt;/span&gt;&lt;br /&gt;
I am not at all surprised that CRM, and HCM are leading the way as 
far as SaaS application adoption is concerned. One only needs to examine
 the reason behind why these categories took off. During the so-called 
“Great Recession,” companies wanted an efficient way in which to grow 
revenues and cut costs. On the revenue side of the equation, companies 
found that sales force automation (SFA) helped them close more deals 
faster, and increased customer visibility allowed them to focus on 
customer retention as well as potential upsell and cross-sell 
opportunities. On the costs side, some have argued that functions such 
as HR and Talent Management were the first to be moved to the cloud as 
they were considered “non-core”.&lt;br /&gt;
As data volumes, global deployment, and end-user adoption grew, it 
became increasingly clear that out-of-the-box CRM or HCM functionality 
was not going to cut it, and that customization options would be 
necessary. Out of this necessity evolved the world of Platform-as-a-Service (PaaS). Similar to the concept of SaaS, a PaaS environment
 involves built-in scalability, reliability, security, databases, 
interfaces to web services and a container with development tools for 
building custom apps. Salesforce.com was one of the early creators of 
this new cloud ecosystem with its Force.com platform. This platform, which now numbers over 220,000 apps
 (as of the publication of this blog post) provides numerous options to 
customize a CRM deployment as well as build websites, and numerous 
productivity-enhancing and vertical specific apps from the ground up 
that tied into the core CRM functionality.&lt;br /&gt;
While the PaaS ecosystem provides a great avenue to build custom apps
 and increase cloud application adoption, too often it is tied into the 
code-base of the dominant SaaS player that brought it into existence. As
 a result, other non-CRM and non-HCM functions such as PLM, SCM, BI, and
 ERP still largely remain in the on-premises world.&lt;br /&gt;
This is where iPaaS, or integration PaaS
 comes into play. Each of these other non-CRM functions is an important 
part of the value chain, whether upstream, or downstream. PLM and SCM 
systems for instance interact frequently with ERP systems. BI and 
analytics software have multiple touch points with all these systems. 
Integrating all these systems together and tying them to specific 
customer records in the CRM system has been such a time-consuming task 
that most SaaS providers simply chant the mantra of “web services” when 
asked by customers how they can connect various SaaS ecosystems 
together. Web services typically accomplish a very specific business 
process and specific task between two different SaaS applications, and 
the web services APIs do not lead to repeatability. In fact, a Slashdot 
blog on the API economy mentioned that there were some 5,000 APIs estimated by the end of 2012 and some 30,000 estimated in the next four years.&lt;br /&gt;
The proliferation of APIs along with SaaS adoption only strengthens 
the need for an integration PaaS that abstracts the underlying 
orchestrations of these APIs to end users. An integration PaaS, allows developers to build full (or partial, if desired) native connectivity
 to every single object within an application, whether SaaS or not. By 
building native connectors, every permutation and combination of objects
 between different SaaS applications is possible, thereby increasing the
 possibility for companies to choose those SaaS apps that fit their 
business function or department. With increasing confidence of the 
existence of the integration PaaS, companies will continue to adopt SaaS
 apps in other LOBs, and not just the mainstream CRM or HCM categories. 
This in turn spurs the other SaaS category providers to invest more in 
R&amp;amp;D and come out with even more advanced functionality.&lt;br /&gt;
With the increased innovation occurring across all SaaS applications,
 we can expect more and more complex use cases involving larger amounts 
of data. All of this coupled with custom apps built on competing PaaS 
platforms will only further increase the use of an integration PaaS to 
achieve cloud data integration.&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/8751860416581794098/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/0-how-integration-platform-as-service.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/8751860416581794098'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/8751860416581794098'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/0-how-integration-platform-as-service.html' title='How Integration Platform-as-a-Service Impacts Cloud Adoption'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-7956036373767872838</id><published>2012-08-10T13:20:00.005+05:30</published><updated>2012-08-10T13:20:57.255+05:30</updated><title type='text'>Delivering IT Value with Master Data Management and the Cloud</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Over the last few years most enterprises have implemented several (if
 not more) large ERP and CRM suites. Although these applications were 
meant to have self-contained data models, it turns out that many 
enterprises still need to manage “master data” between the various 
applications. So the traditional IT role of hardware administration and 
custom programming has evolved to packaged application implementation 
and large scale data management. &amp;nbsp;According to Wikipedia:
 “MDM has the objective of providing processes for collecting, 
aggregating, matching, consolidating, quality-assuring, persisting and 
distributing such data throughout an organization to ensure consistency 
and control in the ongoing maintenance and application use of this 
information.” Instead of designing large data warehouses to maintain the
 master data, many organizations turn to packaged Master Data Management
 (MDM) packages (such as Informatica MDM).
 With these tools at hand, IT shops can then build true Customer Master,
 Product Master (Product Information Management – PIM), Employee, or 
Supplier Master solutions.&lt;span id=&quot;more-10236&quot;&gt;&lt;/span&gt;&lt;br /&gt;

MDM solutions vary by industry in terms of tactical approaches taken –
 e.g., pharmaceutical/life sciences will adopt semi-batch, 
database-centric approaches for master physician data to be deployed to 
sales forces, while financial services providers and online retailers 
will require near real-time, business process-centric solutions to 
compete in the business-to-consumer (B2C) online world. These different 
types of implementations require technical IT expertise in delivering an
 end-to-end solution. &amp;nbsp;Based on quarterly surveys of the MDM Institute 
Business Council™ (8,000+ subscribers to the MDM Alert newsletter 
engaged in MDM projects), the perennial top four business drivers for 
MDM initiatives are summarized as:&lt;br /&gt;

(1)&amp;nbsp;&amp;nbsp;&amp;nbsp; compliance and regulatory reporting;&lt;br /&gt;

(2)&amp;nbsp;&amp;nbsp;&amp;nbsp; economies of scale for mergers and acquisitions (M&amp;amp;A);&lt;br /&gt;

(3)&amp;nbsp;&amp;nbsp;&amp;nbsp; synergies for cross-sell and up-sell;&lt;br /&gt;

(4)&amp;nbsp;&amp;nbsp;&amp;nbsp; legacy system integration and augmentation; and&lt;br /&gt;

Note that this list represents &lt;strong&gt;business drivers&lt;/strong&gt;, not
 technical initiatives. Ideally, the business analyst “owns” the data 
and is responsible for the initial definition of what the master data 
looks like (whether this is from a custom application or a packaged 
solution). In addition, they are responsible for the processes (not 
actual data entry) of inputting the data into the source systems. IT 
acts as “data stewards” – coordinators between various business groups. 
IT’s role should be project managers that phase in updates to the 
primary MDM. These data stewards must be equally savvy in data modeling 
as well as business processes. &amp;nbsp;IT must also be the technical gurus to 
glue applications and databases together. This also involves data 
quality processes, such as standardization, cleansing, validation, 
enrichment and matching.&lt;br /&gt;

Traditional MDM solutions have been implemented on premise, primarily
 as data hubs to various applications spokes such as Human Resources, 
PLM, ERP, and CRM applications. With the huge uptick of software as a 
service (SaaS) CRM providers such as salesforce.com, this requires MDM 
solutions to integrate data from the cloud.&lt;br /&gt;

While an on-premise model works well when most of the data is updated within the “four walls” of the enterprise, a &lt;strong&gt;hybrid cloud + on premise&lt;/strong&gt;
 model may be better suited to a B2C environment when massive customer 
updates happen on a seasonal basis. In this case, a hybrid model will 
allow for extra cloud resources to be tapped in order to increase 
performance. In addition, with a hybrid model, sensitive data that may 
be legally prohibited from residing in the cloud can be kept on premise.&lt;br /&gt;

&lt;strong&gt;&lt;em&gt;Should MDM be completely implemented in the cloud?&lt;/em&gt;&lt;/strong&gt;&lt;br /&gt;

In this case, the master data model engine will reside in the cloud 
and will act as a hub between multiple SaaS applications and potentially
 on premise applications. A common scenario might be managing customer 
data between Salesforce CRM, Order fulfillment with UPS services, and 
on-premise ERP Receivables. Or replace the on-premise ERP solution with a
 cloud-based ERP such as NetSuite. In these cases, having MDM in the 
cloud might be the right approach. A cloud-based solution also makes 
sense for piloting a longer term MDM project. So look for a vendor that 
provides both on-premise &lt;span style=&quot;text-decoration: underline;&quot;&gt;and&lt;/span&gt; cloud-based MDM solutions for maximum deployment flexibility.&lt;br /&gt;

The Hybrid IT organization continues to evolve with new 
responsibilities. Cloud-based solutions tend to free up the IT staff 
from the more routine data center operations to get more involved with 
business activities such as Master Data Management. IT will play an 
important role in managing MDM solutions. Although, they don’t “own” the
 data, the technical requirements for implementing a solution remain in 
the IT domain. And acting as a data steward to capture the business 
requirements of what data needs to be managed and formulate the detailed
 rules and processes will become a key role. IT will also need to decide
 between on-premise and cloud-based architectures for the enterprise.&lt;br /&gt;

—-&lt;br /&gt;

Mercury Consulting is a trusted technology advisor with deep 
expertise in cloud applications. We offer strategic guidance to senior 
executives to select the right cloud solution and services assistance to
 help enterprises accelerate their adoption of cloud solutions.&lt;br /&gt;

Mike Canniff
 is a faculty member of Management Information Systems at the University
 of Pacific – Eberhardt School of Business. He has worked in the 
Information Technology field for over 20 years beginning with IBM as a 
software engineer and as Vice President, Development for Acuitrek 
Software. Mike has specialized his career research in the areas of 
Enterprise Application Integration and Electronic Commerce systems. He 
has published several papers on Electronic Commerce and Business Process
 Management best practices.&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/7956036373767872838/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/delivering-it-value-with-master-data.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/7956036373767872838'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/7956036373767872838'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/delivering-it-value-with-master-data.html' title='Delivering IT Value with Master Data Management and the Cloud'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-5264111076265477407</id><published>2012-08-10T13:19:00.002+05:30</published><updated>2012-08-10T13:19:22.387+05:30</updated><title type='text'>Electronic Trading Systems Moving to the Cloud</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
More and more business applications are moving from the desktop to 
the cloud, and electronic trading applications are no different.&lt;br /&gt;

Over the last five or ten years, application vendors have established
 several advantages of running major applications, even mission-critical
 applications like salesforce.com, over the cloud.&lt;br /&gt;

These advantages include:&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;Easier and smoother upgrades, which provides much better 
adaptability and agility in the face of changing market and business 
conditions, plus a better user experience,&lt;/li&gt;
&lt;li&gt;Better scalability, with newer technology advances, and&lt;/li&gt;
&lt;li&gt;Better portability across a wide array of device types, including smartphones and tablets (especially in the last 2-3 years).&lt;/li&gt;
&lt;/ul&gt;
Recent improvements in Web technology, such as HTML5 WebSockets, are 
helping to speed this transition along by providing several throughput 
and latency advantages over earlier iterations of Web technology, and 
even over native Windows applications. Now, application architects can 
freely choose the technology that provides a better path for growth, 
agility, and scalability, which is often a Cloud-based solution.&lt;br /&gt;

As I write this, a few of our customers who provide electronic 
trading solutions to their clients are making the strategic move to 
develop a next generation application based in the Cloud. The main 
driver for one customer was to be able to take on more clients more 
quickly and therefore grow the business faster by increasing marginal 
revenue and profitability. They found that the list of challenges with a
 thick desktop client to be just too big for growing the business as 
quickly as they wanted to — or needed to.&lt;br /&gt;

Messaging middleware, especially peer-to-peer solutions such as Informatica Ultra Messaging,
 can be a very important piece of a Cloud-based application. The 
peer-to-peer “nothing in the middle” model provides applications not 
just ultra-high performance (whether for high throughput or low 
latency), but also near-linear scalability, true 24×7 reliability and 
availability, and business and IT agility. These qualities tie directly 
to the advantages listed above.&lt;br /&gt;

Cloud-based applications, of course, must also contend with the 
Internet and all that comes with that: support for various browsers and 
platforms (and versions of each), scalability and bandwidth issues, and 
mobile devices like smartphones and tablets. New web technologies like 
HTML5 WebSockets from Kaazing
 are best positioned to take care of the path from server to the 
smartphone or tablet, and with JMS connectivity to Ultra Messaging on 
the back end, can provide a Cloud-based application with a lean, 
scalable and agile infrastructure, usually with less hardware.&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/5264111076265477407/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/electronic-trading-systems-moving-to.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/5264111076265477407'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/5264111076265477407'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/08/electronic-trading-systems-moving-to.html' title='Electronic Trading Systems Moving to the Cloud'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-5288812538589387735</id><published>2012-05-29T21:55:00.005+05:30</published><updated>2012-05-29T21:55:53.916+05:30</updated><title type='text'>SAP HANA Architecture</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
In this article we will discuss about the architecture overview of the 
In-Memory Computing Engine of SAP HANA. The SAP HANA database is 
developed in C++ and runs on SUSE Linux Enterpise Server. SAP HANA 
database consists of multiple servers and the most important component 
is the &lt;b&gt;Index Server&lt;/b&gt;. SAP HANA database consists of Index Server, Name Server, Statistics Server, Preprocessor Server and XS Engine.

 

&lt;br /&gt;
&lt;div&gt;


&lt;/div&gt;
&lt;ol&gt;
&lt;li&gt; &lt;b&gt;Index Server&lt;/b&gt; contains the actual data and the engines for processing the data. It also coordinates and uses all the other servers.

&lt;/li&gt;
&lt;li&gt; &lt;b&gt;Name Server&lt;/b&gt; holds information about the SAP HANA 
databse topology. This is used in a distributed system with instances of
 HANA database on different 

hosts. The name server knows where the components are running and which 
data is located on which server.

&lt;/li&gt;
&lt;li&gt; &lt;b&gt;Statistics Server&lt;/b&gt; collects information about Status, 
Performance and Resource Consumption from all the other server 
components. From the SAP HANA Studio we can access the Statistics Server
 to get status of various alert monitors.

&lt;/li&gt;
&lt;li&gt; &lt;b&gt;Preprocessor Server&lt;/b&gt; is used for Analysing Text Data and extracting the information on which the text search capabilities are based .

&lt;/li&gt;
&lt;li&gt; &lt;b&gt;XS Engine&lt;/b&gt; is an optional component. Using XS Engine clients can connect to SAP HANA database to fetch data via HTTP.
&lt;/li&gt;
&lt;/ol&gt;
Now let us check the architecture components of SAP HANA Index Server.
&lt;br /&gt;


&lt;h3&gt;
SAP HANA Index Server Architecture:&lt;/h3&gt;
&lt;div align=&quot;center&quot;&gt;

&lt;a href=&quot;http://nonpng.dwbiconcepts.com/images/dbase/hana/Architecture.jpg&quot;&gt;  
&lt;img alt=&quot;Block Diagram of Engine Components&quot; class=&quot;caption&quot; src=&quot;http://nonpng.dwbiconcepts.com/images/dbase/hana/Architecture-s.jpg&quot; title=&quot;Block Diagram of Engine Components&quot; /&gt;
&lt;/a&gt;
&lt;/div&gt;
&lt;ol&gt;
&lt;li&gt; &lt;b&gt;Connection and Session Management&lt;/b&gt; component is responsible 
for creating and managing sessions and connections for the database 
clients. Once a session is established, clients can communicate with the
 SAP HANA database using SQL statements. For each session a set of 
parameters are maintained like, &lt;b&gt;auto-commit&lt;/b&gt;, current transaction &lt;b&gt;isolation level&lt;/b&gt; etc. Users are &lt;b&gt;Authenticated&lt;/b&gt;
 either by the SAP HANA database itself (login with user and password) 
or authentication can be delegated to an external authentication 
providers such as an LDAP directory.

&lt;/li&gt;
&lt;li&gt; The client requests are analyzed and executed by the set of components summarized as &lt;b&gt;Request Processing And Execution Control&lt;/b&gt;.
 The Request Parser analyses the client request and dispatches it to the
 responsible component. The Execution Layer acts as the controller that 
invokes the different engines and routes intermediate results to the 
next execution step. 

For example, Transaction Control statements are forwarded to the 
Transaction Manager. Data Definition statements are dispatched to the 
Metadata Manager and Object invocations are forwarded to Object Store. 
Data Manipulation statements are forwarded to the Optimizer which 
creates an Optimized Execution Plan that is subsequently forwarded to 
the execution layer.
&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt; The &lt;b&gt;SQL Parser&lt;/b&gt; checks the syntax and semantics of the 
client SQL statements and generates the Logical Execution Plan. Standard
 SQL statements are processed directly by DB engine.
    
    &lt;/li&gt;
&lt;li&gt; The SAP HANA database has its own scripting language named &lt;b&gt;SQLScript&lt;/b&gt;
 that is designed to enable optimizations and parallelization. SQLScript
 is a collection of extensions to SQL. SQLScript is based on side effect
 free functions that operate on tables using SQL queries for set 
processing. The motivation for SQLScript is to offload data-intensive 
application logic into the database.

    &lt;/li&gt;
&lt;li&gt;&lt;b&gt;Multidimensional Expressions&lt;/b&gt; (MDX) is a language for querying and manipulating the multidimensional data stored in OLAP cubes.
    
    &lt;/li&gt;
&lt;li&gt; The SAP HANA database also contains a component called the &lt;b&gt;Planning Engine&lt;/b&gt;
 that allows financial planning applications to execute basic planning 
operations in the database layer. One such basic operation is to create a
 new version of a dataset as a copy of an existing one while applying 
filters and transformations. For example: Planning data for a new year 
is created as a copy of the data from the previous year. This requires 
filtering by year and updating the time dimension. Another example for a
 planning operation is the disaggregation operation that distributes 
target values from higher to lower aggregation levels based on a 
distribution function.
    
    &lt;/li&gt;
&lt;li&gt; The SAP HANA database also has built-in support for 
domain-specific models (such as for financial planning) and it offers 
scripting capabilities that allow application-specific calculations to 
run inside the database. 
&lt;/li&gt;
&lt;/ul&gt;
The SAP HANA database features such as SQLScript and Planning 
operations are implemented using a common infrastructure called the &lt;b&gt;Calc engine&lt;/b&gt;.
 The SQLScript, MDX, Planning Model and Domain-Specific models are 
converted into Calculation Models. The Calc Engine creates Logical 
Execution Plan for &lt;b&gt;Calculation Models&lt;/b&gt;.  The Calculation Engine 
will break up a model, for example some SQL Script, into operations that
 can be processed in parallel. The engine also executes the user defined
 functions.
&lt;br /&gt;


&lt;/li&gt;
&lt;li&gt; In HANA database, each SQL statement is processed in the 
context of a transaction. New sessions are implicitly assigned to a new 
transaction. The &lt;b&gt;Transaction Manager&lt;/b&gt; coordinates database 
transactions, controls transactional isolation and keeps track of 
running and closed transactions. When a transaction is committed or 
rolled back, the transaction manager informs the involved engines about 
this event so they can execute necessary actions. The transaction 
manager also cooperates with the persistence layer to achieve atomic and
 durable transactions.

&lt;/li&gt;
&lt;li&gt; Metadata can be accessed via the &lt;b&gt;Metadata Manager&lt;/b&gt;. The 
SAP HANA database metadata comprises of a variety of objects, such as 
definitions of relational tables, columns, views, and indexes, 
definitions of SQLScript functions and object store metadata. Metadata 
of all these types is stored in one common catalog for all SAP HANA 
database stores (in-memory row store, in-memory column store, object 
store, disk-based). Metadata is stored in tables in row store. The SAP 
HANA database features such as transaction support, multi-version 
concurrency control, are also used for metadata management. In 
distributed database systems central metadata is shared across servers. 
How metadata is actually stored and shared is hidden from the components
 that use the metadata manager.

&lt;/li&gt;
&lt;li&gt; The &lt;b&gt;Authorization Manager&lt;/b&gt; is invoked by other SAP HANA 
database components to check whether the user has the required 
privileges to execute the requested operations. SAP HANA allows granting
 of privileges to users or roles. A privilege grants the right to 
perform a specified operation (such as create, update, select, execute, 
and so on) on a specified object (for example a table, view, SQLScript 
function, and so on). 


The SAP HANA database supports &lt;b&gt;Analytic Privileges&lt;/b&gt; that represent
 filters or hierarchy drilldown limitations for analytic queries. 
Analytic privileges grant access to values with a certain combination of
 dimension attributes. This is used to restrict access to a cube with 
some values of the dimensional attributes.
&lt;br /&gt;


&lt;/li&gt;
&lt;li&gt; &lt;b&gt;Database Optimizer&lt;/b&gt; gets the &lt;b&gt;Logical Execution Plan&lt;/b&gt; from the SQL Parser or the Calc Engine as input and generates the optimised &lt;b&gt;Physical Execution Plan&lt;/b&gt; based on the database &lt;b&gt;Statistics&lt;/b&gt;. The database optimizer which will determine the best plan for accessing row or column stores.

&lt;/li&gt;
&lt;li&gt; &lt;b&gt;Database Executor&lt;/b&gt; basically executes the Physical 
Execution Plan to access the row and column stores and also process all 
the intermediate results. 

&lt;/li&gt;
&lt;li&gt; The &lt;b&gt;Row Store&lt;/b&gt; is the SAP HANA database row-based 
in-memory relational data engine. Optimized for high performance of 
write operation, Interfaced from calculation / execution layer. 
Optimised Write and Read operation is possible due to Storage separation
 i.e. Transactional Version Memory &amp;amp; Persisted Segment.  


&lt;a href=&quot;http://nonpng.dwbiconcepts.com/images/dbase/hana/Row-Store.jpg&quot;&gt;  
&lt;img alt=&quot;Row Store Block Diagram&quot; class=&quot;caption&quot; src=&quot;http://nonpng.dwbiconcepts.com/images/dbase/hana/Row-Store-s.jpg&quot; title=&quot;Row Store Block Diagram&quot; /&gt;
&lt;/a&gt;
&lt;br /&gt;


&lt;ul&gt;
&lt;li&gt; &lt;b&gt;Transactional Version Memory&lt;/b&gt; contains temporary versions
 i.e. Recent versions of changed records. This is required for 
Multi-Version Concurrency Control (MVCC). Write Operations mainly go 
into Transactional Version Memory. INSERT statement also writes to the 
Persisted Segment.
   
    &lt;/li&gt;
&lt;li&gt; &lt;b&gt;Persisted Segment&lt;/b&gt; contains data that may be seen by
 any ongoing active transactions. Data that has been committed before 
any active transaction was started.
 
    &lt;/li&gt;
&lt;li&gt; &lt;b&gt;Version Memory Consoliation&lt;/b&gt; moves the recent 
version of changed records from Transaction Version Memory to Persisted 
Segment based on Commit ID. It also clears outdated record versions from
 Transactional Version Memory. It can be considered as garbage collector
 for MVCC.
    
    &lt;/li&gt;
&lt;li&gt; &lt;b&gt;Segments&lt;/b&gt; contain the actual data (content of 
row-store tables) in pages. Row store tables are linked list of memory 
pages. Pages are grouped in segments. Typical Page size is 16 KB.

    &lt;/li&gt;
&lt;li&gt; &lt;b&gt;Page Manager&lt;/b&gt; is responsible for Memory allocation. It also keeps track of free/used pages.
&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt; The &lt;b&gt;Column Store&lt;/b&gt; is the SAP HANA database column-based in-memory relational data engine. Parts of it originate from &lt;b&gt;TREX&lt;/b&gt;
 (Text Retrieval and Extraction) i.e SAP NetWeaver Search and 
Classification. For the SAP HANA database this proven technology was 
further developed into a full relational column-based data store. 
Efficient data compression and optimized for high performance of read 
operation, Interfaced from calculation / execution layer. Optimised Read
 and Write operation is possible due to Storage separation i.e. Main 
&amp;amp; Delta.   


&lt;a href=&quot;http://nonpng.dwbiconcepts.com/images/dbase/hana/Column-Store.jpg&quot;&gt;  
&lt;img alt=&quot;Column Store Block Diagram&quot; class=&quot;caption&quot; src=&quot;http://nonpng.dwbiconcepts.com/images/dbase/hana/Column-Store-s.jpg&quot; title=&quot;Column Store Block Diagram&quot; /&gt;
&lt;/a&gt;
&lt;br /&gt;


&lt;ul&gt;
&lt;li&gt; &lt;b&gt;Main Storage&lt;/b&gt; contains the compressed data in memory for fast read. 
    
    &lt;/li&gt;
&lt;li&gt; &lt;b&gt;Delta Storage&lt;/b&gt; is meant for fast write operation. The update is performed by inserting a new entry into the delta storage.
    
    &lt;/li&gt;
&lt;li&gt; &lt;b&gt;Delta Merge&lt;/b&gt; is an asynchronous process to move 
changes in delta storage into the compressed and read optimized main 
storage. Even during the merge operation the columnar table will be 
still available for read and write operations. To fulfil this 
requirement, a second delta and main storage are used internally.   

    &lt;/li&gt;
&lt;li&gt; During &lt;b&gt;Read Operation&lt;/b&gt; data is always read from both
 main &amp;amp; delta storages and result set is merged. Engine uses multi 
version concurrency control (MVCC) to ensure consistent read operations.

    &lt;/li&gt;
&lt;li&gt; As row tables and columnar tables can be combined in one 
SQL statement, the corresponding engines must be able to consume 
intermediate results created by each other. A main difference between 
the two engines is the way they process data: Row store operators 
process data in a row-at-a-time fashion using iterators. Column store 
operations require that the entire column is available in contiguous 
memory locations. To exchange intermediate results, row store can 
provide results to column store materialized as complete rows in memory 
while column store can expose results using the iterator interface 
needed by row store.
&lt;/li&gt;
&lt;/ul&gt;
&lt;/li&gt;
&lt;li&gt; The &lt;b&gt;Persistence Layer&lt;/b&gt; is responsible for durability and
 atomicity of transactions. It ensures that the database is restored to 
the most recent committed state after a restart and that transactions 
are either completely executed or completely undone. To achieve this 
goal in an efficient way the per-sistence layer uses a combination of 
write-ahead logs, shadow paging and savepoints. The persistence layer 
offers interfaces for writing and reading data. It also contains SAP 
HANA &#39;s logger that manages the transaction log. Log entries can be 
written implicitly by the persistence layer when data is written via the
 persistence interface or explicitly by using a log interface.

&lt;/li&gt;
&lt;/ol&gt;
&lt;h3&gt;
Distributed System and High Availability&lt;/h3&gt;
The SAP HANA Appliance software supports High Availability. SAP HANA 
scales systems beyond one server and can remove the possibility of 
single point of failure. So a typical Distributed Scale out Cluster 
Landscape will have many server instances in a cluster. Therefore Large 
tables can also be distributed across multiple servers. Again Queries 
can also be executed across servers. SAP HANA Distributed System also 
ensures transaction safety.


&lt;b&gt;Features&lt;/b&gt;
&lt;br /&gt;


&lt;ul&gt;
&lt;li&gt; N &lt;b&gt;Active&lt;/b&gt; Servers or &lt;b&gt;Worker&lt;/b&gt; hosts in the cluster.
&lt;/li&gt;
&lt;li&gt; M &lt;b&gt;Standby&lt;/b&gt; Server(s) in the cluster.
&lt;/li&gt;
&lt;li&gt; &lt;b&gt;Shared file system&lt;/b&gt; for all Servers. Serveral instances of SAP HANA share the same metadata.
&lt;/li&gt;
&lt;li&gt; &lt;b&gt;Each&lt;/b&gt; Server hosts an &lt;b&gt;Index&lt;/b&gt; Server &amp;amp; &lt;b&gt;Name&lt;/b&gt; Server.
&lt;/li&gt;
&lt;li&gt; Only &lt;b&gt;one&lt;/b&gt; &lt;b&gt;Active&lt;/b&gt; Server hosts the &lt;b&gt;Statistics&lt;/b&gt; Server.
&lt;/li&gt;
&lt;li&gt; During startup one server gets elected as &lt;b&gt;Active Master&lt;/b&gt;. 
&lt;/li&gt;
&lt;li&gt; The Active Master assigns a volume to each starting Index Server or no volume in case of cold Standby Servers.
&lt;/li&gt;
&lt;li&gt; Upto &lt;b&gt;3&lt;/b&gt; Master Name Servers can be defined or configured.
&lt;/li&gt;
&lt;li&gt; Maximum of &lt;b&gt;16&lt;/b&gt; nodes is supported in High Availability configurations.
&lt;/li&gt;
&lt;/ul&gt;
&lt;br /&gt;
&lt;table align=&quot;center&quot; border=&quot;1&quot; cellpadding=&quot;10&quot; cellspacing=&quot;0&quot;&gt;
&lt;tbody&gt;
&lt;tr bgcolor=&quot;#C2DFFF&quot;&gt;
&lt;td&gt;Name Server Configured Role&lt;/td&gt;
&lt;td&gt;Name Server Actual Role&lt;/td&gt;
&lt;td&gt;Index Server Configured Role&lt;/td&gt;
&lt;td&gt;Index Server Actual Role&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Master 1&lt;/td&gt;
&lt;td&gt;Master&lt;/td&gt;
&lt;td&gt;Worker&lt;/td&gt;
&lt;td&gt;Master&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Master 2&lt;/td&gt;
&lt;td&gt;Slave&lt;/td&gt;
&lt;td&gt;Worker&lt;/td&gt;
&lt;td&gt;Slave&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Master 3&lt;/td&gt;
&lt;td&gt;Slave&lt;/td&gt;
&lt;td&gt;Worker&lt;/td&gt;
&lt;td&gt;Slave&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td&gt;Slave&lt;/td&gt;
&lt;td&gt;Slave&lt;/td&gt;
&lt;td&gt;Standby&lt;/td&gt;
&lt;td&gt;Standby&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;
&lt;/table&gt;
&lt;h3&gt;
Failover&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt; High Availability enables the failover of a node within one 
distributed SAP HANA appliance. Failover uses a cold Standby node and 
gets triggered automatically. So when a Active Server X fails, Standby 
Server N+1 reads indexes from the shared storage and connects to logical
 connection of failed server X.

&lt;/li&gt;
&lt;li&gt; If the SAP HANA system detects a failover situation, the work 
of the services on the failed server is reassigned to the services 
running on the standby host. The failed volume and all the included 
tables are reassigned and loaded into memory in accordance with the 
failover strategy defined for the system. This reassignment can be 
performed without moving any data, because all the persistency of the 
servers is stored on a shared disk. Data and logs are stored on shared 
storage, where every server has access to the same disks.

&lt;/li&gt;
&lt;li&gt; The Master Name Server detects an Index Server failure and 
executes the failover. During the failover the Master Name Server 
assigns the volume of the failed Index Server to the cold Standby 
Server. In case of a Master Name Server failure, another of the 
remaining Name Servers will become Active Master.

&lt;/li&gt;
&lt;li&gt; Before a failover is performed, the system waits for a few 
seconds to determine whether the service can be restarted. Standby node 
can take over the role of a failing master or failing slave node&lt;/li&gt;
&lt;/ul&gt;
&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/5288812538589387735/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/sap-hana-architecture.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/5288812538589387735'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/5288812538589387735'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/sap-hana-architecture.html' title='SAP HANA Architecture'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-2766333810945805261</id><published>2012-05-29T21:55:00.001+05:30</published><updated>2012-05-29T21:55:09.756+05:30</updated><title type='text'>SAP HANA - An Introduction for the beginners</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
SAP HANA: High-Performance Analytic Appliance (HANA)is
 an In-Memory Database from SAP to store data and analyze large volumes 
of non aggregated transactional data in Real-time with unprecedented 
performance ideal for decision support &amp;amp; predictive analysis. 

 

&lt;br /&gt;
&lt;div&gt;
 
  
&lt;/div&gt;
The In-Memory Computing Engine is a next generation innovation that uses
 cache-conscious data-structures and algorithms leveraging hardware 
innovation as well as SAP software technology innovations. It is ideal 
for Real-time OLTP and OLAP in one appliance i.e. E-2-E solution from 
Transactional to high performance Analytics. SAP HANA can also be used 
as a secondary database to accelerate analytics on existing 
applications.
&lt;br /&gt;


&lt;h3&gt;
Hardware Innovations - Leading to HANA&lt;/h3&gt;
In real world we have so many variety of data sources, e.g. Unstructured
 Data, Operational Data Stores, Data Marts, Data Warehouses, Online 
Analytical Stores, etc. To do analytics or information mining from this 
Big Data at real time we come across the hurdles like Latency, High Cost
 and Complexity. 
&lt;br /&gt;

Disk I/O was the Performance bottleneck in the past, whereas in 
memory computing was always much faster than that. Earlier, however, the
 cost of in-memory computing was prohibitive for any large scale 
implementation. Now with Multi-Core CPU and high capacity of RAM, we can
 host the entire database in memory. So now CPU is waiting for data to 
be loaded from main memory into CPU cache - and that&#39;s what is the 
Performance bottleneck today. 
&lt;br /&gt;

This is a total paradigm shift; &lt;i&gt;Tape is Dead, Disk is Tape, Main Memory is Disk &amp;amp; CPU Cache is Main Memory&lt;/i&gt;.
 HANA is optimized to exploit the parallel processing capabilities of 
modern multi-core/CPU architectures. With this architecture, SAP 
applications can benefit from current hardware technologies.
&lt;br /&gt;


&lt;h3&gt;
Memory Overview - Where we stand&lt;/h3&gt;
Let us have a quick look on Multi-Core CPU Caches, Main Memory i.e. RAM 
&amp;amp; traditional Hard Disk with respect to response time.
&lt;br /&gt;

&lt;ul class=&quot;clipbox&quot;&gt;
&lt;li&gt; L1 cache - Primary &amp;amp; within core. SRAM - Fastest.  L1 cache | ~ 1ns | 64k
&lt;/li&gt;
&lt;li&gt; L2 cache – Intermediate &amp;amp; within core. DRAM - Slower.  L2 cache | ~ 5ns | 256k
&lt;/li&gt;
&lt;li&gt; L3 Cache – Shared across all cores. DRAM - Slowest. L3 cache | ~ 20ns | 8M
&lt;/li&gt;
&lt;li&gt; Main Memory | ~ 100ns | TBs
&lt;/li&gt;
&lt;li&gt; Hard Disk | &amp;gt; 1.000.000ns | TBs
&lt;/li&gt;
&lt;/ul&gt;
&lt;h3&gt;
HANA Hardware Requirement&lt;/h3&gt;
HANA can be installed on many certified SAP hardware partners: Hewlett Packard, IBM, Fujitsu Computers, CISCO systems, DELL. 
&lt;br /&gt;

&lt;div class=&quot;notebox&quot;&gt;
Currently SUSE Linux Enterprise Server x86-64 (SLES) 11 SP1 is the Operating System supported by SAP HANA. &lt;/div&gt;
A typical example of CPU and RAM can be 4 Intel E7-4870 / 40 cores 
and 512 GB RAM. SAP recommends a dedicated server network communication 
of 10 GBit/s between the SAP HANA landscape and the source system for 
efficient data replication.
&lt;br /&gt;


&lt;h3&gt;
HANA Database Features&lt;/h3&gt;
Important database features of HANA include OLTP &amp;amp; OLAP 
capabilities,  Extreme Performance, In-Memory , Massively Parallel 
Processing, Hybrid Database, Column Store, Row Store, Complex Event 
Processing, Calculation Engine,  Compression, Virtual Views, 
Partitioning and No aggregates. HANA In-Memory Architecture includes the
 In-Memory Computing Engine and In-Memory Computing Studio for modeling 
and administration. All the properties need a detailed explanation 
followed by the SAP HANA Architecture.
&lt;br /&gt;


&lt;h3&gt;
Basic Concepts behind HANA Database&lt;/h3&gt;
&lt;h4&gt;
Extreme Hardware Innovations:&lt;/h4&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;
Main memory is no-longer a limited 
resource, modern servers can have 2TB of system memory and this allows 
complete databases to be held in RAM. Currently processors have up to 64
 cores, and 128 cores will soon be available. With the increasing number
 of cores, CPUs are able to process increased data per time interval. 
This shifts the performance bottleneck from disk I/O to the data 
transfer between main memory and CPU cache.&lt;/div&gt;
&lt;h4&gt;
In-Memory Database:&lt;/h4&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;
HANA fully leverages the hardware 
innovations like Multi-Core CPU, High capacity RAM availability. The 
basic concept is to cache the entire database into fast accessible Main 
Memory close to CPU for faster execution and to avoid disk I/O. Disk 
storage is still required for permanent persistency since Main Memory is
 volatile. SAP HANA, holds the bulk of its data in memory for maximum 
performance, but still uses persistent storage to provide a fallback in 
case of failure. Data and log are automatically saved to disk at regular
 save points, the log is also saved to disk after each COMMIT of a 
database transaction. Disk write operations happens asynchronously and 
as a background task. Generally on system start-up HANA loads the tables
 into memory.&lt;/div&gt;
&lt;h4&gt;
Massively Parallel Processing:&lt;/h4&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;
With availability of Multi-Core CPUs, 
higher CPU execution speeds can be achieved. Multiple CPUs call for new 
parallel algorithms to be used in databases in order to fully utilize 
the computing resources available. HANA Column-based storage makes it 
easy to execute operations in parallel using multiple processor cores. 
In a column store data is already vertically partitioned. This means 
that operations on different columns can easily be processed in 
parallel. If multiple columns need to be searched or aggregated, each of
 these operations can be assigned to a different processor core. In 
addition operations on one column can be parallelized by partitioning 
the column into multiple sections that can be processed by different 
processor cores. With the SAP HANA database, queries can be executed 
rapidly and in parallel.
&lt;/div&gt;
&lt;h4&gt;
Hybrid Data Store:&lt;/h4&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;
Common databases store tabular data 
row-wise, i.e. all data for a record are stored adjacent to each other 
in memory. Row store tables are linked list of memory pages. 
Conceptually, a database table is a two-dimensional data structure with 
cells organized in rows and columns. Computer memory however is 
organized as a linear structure. To store a table in linear memory, two 
options exist:&lt;/div&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;

&lt;/div&gt;
&lt;ul&gt;
&lt;li&gt;A &lt;b&gt;row-oriented storage&lt;/b&gt; stores a table as a sequence of records, each of which contain the fields of one row. &lt;/li&gt;
&lt;li&gt;A &lt;b&gt;column-oriented storage&lt;/b&gt; stores all the values of a column in contiguous memory locations. &lt;/li&gt;
&lt;/ul&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;
Use of column store will help to prevent 
table scan of unnecessary columns while performing searching and 
aggregation operations on single column values stored in contiguous 
memory locations. Such an oper-ation has high spatial locality and can 
efficiently be executed in the CPU cache. With row-oriented storage, the
 same operation would be much slower because data of the same column is 
distributed across memory and the CPU is slowed down by cache misses. 
Column store is optimized for high performance of read operation and 
efficient data compression. This combination of both classical and 
innovative technologies of data storage and access allows the developer 
to choose the best technology for their application and, where 
necessary, use both in parallel.
&lt;/div&gt;
&lt;h4&gt;
OLTP and OLAP Database:&lt;/h4&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;
HANA is a hybrid database, having both 
read optimised column store ideally suited for OLAP and write optimised 
row store best for OLTP systems relational engines. Both the stores are 
In-Memory. Using column stores in OLTP applications requires a balanced 
approach to insertion and indexing of column data to minimize cache 
misses. The SAP HANA database allows the developer to specify whether a 
table is to be stored column-wise or row-wise. It is also possible to 
alter an existing table from columnar to row-based and vice versa.&lt;/div&gt;
&lt;h4&gt;
 Higher Data Compression:&lt;/h4&gt;
&lt;div style=&quot;margin-left: 30px;&quot;&gt;
The goal of keeping all relevant data in 
main memory can be achieved with less cost if data compression is used. 
Columnar data storage allows highly efficient compression. If a column 
is sorted, there will normally be several contiguous values placed 
adjacent to each other in memory. In this case compression methods, such
 as run-length encoding, cluster coding or dictionary coding can be 
used. In column stores a compression factor of 10 can typically be 
achieved compared to traditional row-oriented storage systems.
&lt;/div&gt;
&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/2766333810945805261/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/sap-hana-introduction-for-beginners.html#comment-form' title='2 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2766333810945805261'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/2766333810945805261'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/sap-hana-introduction-for-beginners.html' title='SAP HANA - An Introduction for the beginners'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>2</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-4758787421487973072</id><published>2012-05-29T21:54:00.000+05:30</published><updated>2012-05-29T21:54:00.301+05:30</updated><title type='text'>Building the Next Generation ETL data loading Framework</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
Do you wish for an ETL framework that is highly customizable, 
light-weight and suits perfectly with all of your data loading needs? We
 too! Let&#39;s build one together...&lt;br /&gt;

  
&lt;h3&gt;
What is an ETL framework?&lt;/h3&gt;
ETL Or &quot;Extraction, Transformation and Loading&quot; is the prevalent 
technological paradigm for data integration. While ETL in itself is not a
 tool, ETL processes can be implemented though varied tools and 
programming methods. This includes, but not limited to, tools like 
Informatica PowerCentre, DataStage, BusinessObjects Data Services 
(BODS), SQL Server Integration Services (SSIS), AbInitio etc. and 
programming methods like PL/SQL (Oracle), T-SQL (Microsoft), UNIX shell 
scripting etc. Most of these tools and programming methodologies use a 
generic setup that controls, monitors, executes and Logs the data flow 
through out the ETL process. This generic &#39;setup&#39; is often referred as 
&#39;ETL framework&#39;&lt;br /&gt;

As an example of an ETL framework, let&#39;s consider this. &quot;Harry&quot; needs
 to load 2 tables everyday from one source system to some other target 
system. For this purpose, Harry has created 2 SQL jobs, each of which 
reads data from source through some &quot;SELECT&quot; statements and write the 
data in the target database using some &quot;INSERT&quot; statements. But in order
 to run these jobs, Harry needs couple of more information - e.g. 
&lt;br /&gt;
&lt;ul&gt;
&lt;li&gt;when is a good time to execute these jobs? Can he schedule these jobs to run automatically everyday?
&lt;/li&gt;
&lt;li&gt;Where is the source system located? (Connection information)&lt;/li&gt;
&lt;li&gt;What will happen if one of the jobs fail while loading the data? 
Will Harry get an alert message? Can he simply rerun the jobs after 
fixing the issue of the failure?&lt;/li&gt;
&lt;li&gt;How will Harry know if at all any data is retrieved or loaded to the target?&lt;/li&gt;
&lt;/ul&gt;
Turns out that, Harry needs something more. He needs some kind of setup 
that will govern the job execution regularly. This includes - scheduling
 the jobs, executing the jobs, logging any failure/error information 
(and also alerting Harry about such failures), maintaining the 
connection information and even ensuring that Harry does not end up 
loading the duplicate data.  
&lt;br /&gt;


Such a setup is called &quot;ETL Framework&quot;. And we are trying to build the perfect one here.&lt;br /&gt;


&lt;h3&gt;
Critical Features of an ETL framework&lt;/h3&gt;
In a very broad sense, here are a few of the features that we feel critical in any ETL framework&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;Support for Change Data Capture Or Delta Loading Or Incremental Loading&lt;/li&gt;
&lt;li&gt;Metadata logging&lt;/li&gt;
&lt;li&gt;Handling of multiple source formats&lt;/li&gt;
&lt;li&gt;Restartability support&lt;/li&gt;
&lt;li&gt;Notification support&lt;/li&gt;
&lt;li&gt;Highly configurable / customizable&lt;/li&gt;
&lt;/ul&gt;
&lt;h3&gt;
Good-to-have features of ETL Framework&lt;/h3&gt;
These are some good-to-have features for the framework&lt;br /&gt;

&lt;ul&gt;
&lt;li&gt;Inbuilt data reconciliation&lt;/li&gt;
&lt;li&gt;Customizable log format&lt;/li&gt;
&lt;li&gt;Dry-load enabling&lt;/li&gt;
&lt;li&gt; Multiple notification formats&lt;/li&gt;
&lt;/ul&gt;
&lt;h3&gt;
Request for Proposal for the next-gen ETL framework&lt;/h3&gt;
&lt;h5&gt;
Based on the feature sets above, we are trying to build a generic 
framework that we would make available here for free for everyone&#39;s use.
 &lt;/h5&gt;
However the list of features above are not complete. We are 
requesting our readership to send us RFP for the proposed ETL framework 
that would resolve the incapability / issues in their existing 
frameworks. &lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/4758787421487973072/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/building-next-generation-etl-data.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/4758787421487973072'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/4758787421487973072'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/building-next-generation-etl-data.html' title='Building the Next Generation ETL data loading Framework'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry><entry><id>tag:blogger.com,1999:blog-4025024079216879898.post-6210085845906514068</id><published>2012-05-29T21:53:00.003+05:30</published><updated>2012-05-29T21:53:26.445+05:30</updated><title type='text'>Incremental Loading for Dimension Table</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;
In our previous article
 we have discussed the concept of incremental loading in general. In 
this article we will see how to perform incremental loading for 
dimension tables.
&lt;br /&gt;


 

&lt;h3&gt;
Should we do incremental loading for dimensions?&lt;/h3&gt;
In a dimensional model, we may perform incremental loading for dimension tables also. One may argue that this wont
 be necessary as data volume in dimension tables are not as high as the 
data volumes in the fact tables, hence we can simply do a full load 
every time.
&lt;br /&gt;



I personally do not agree to this argument. This is because during the 
last few years I have seen tremendous growth in the data in dimension 
tables and things can get quite heavy especially if we are trying to 
load SCD type 2 dimensions. Anyway, without much ado, let&#39;s delve deep.
&lt;br /&gt;


&lt;h3&gt;
Standard Method of Loading&lt;/h3&gt;
Like before, for our purpose we will assume we have the below 
customer table in our source system from where we need to perform the 
data loading&lt;br /&gt;


&lt;pre&gt;CustomerID  CustomerName  Type         LastUpdatedDate
1           John          Individual   22-Mar-2012
2           Ryan          Individual   22-Mar-2012
3           Bakers&#39;       Corporate    23-Mar-2012
&lt;/pre&gt;
As discussed in the previous article, a typical SQL query to extract 
data incrementally from this source system will be like this:&lt;br /&gt;


&lt;pre&gt;SELECT t.* 
FROM Customer t
WHERE t.lastUpdatedDate &amp;gt; (select nvl(
                                 max(b.loaded_until), 
                                 to_date(&#39;01-01-1900&#39;, &#39;MM-DD-YYYY&#39;)
                                )
                      from batch b
                      where b.status = &#39;Success&#39;);
&lt;/pre&gt;
Here &quot;batch&quot; is a separate table which stores the date until which we have successfully extracted the data.&lt;br /&gt;



&lt;pre&gt;Batch_ID  Loaded_Until  Status
1         22-Mar-2012   Success
2         23-Mar-2012   Success
&lt;/pre&gt;
&lt;br /&gt;

&lt;h3&gt;
Which one to use: &quot;Entry Date&quot; / &quot;Load Date&quot; or &quot;Last Update Date&quot;?&lt;/h3&gt;
In an incremental load methodology, we should extract the record when
 it is first created and after that whenever the record is updated. 
Therefore, we should always look for &quot;last update date&quot; column for 
extracting records. This is because, &quot;entry date&quot; or &quot;load date&quot; columns
 in the source systems are not enough to determine if the record is 
updated in the later point in time. &lt;br /&gt;

&lt;div class=&quot;notebox&quot;&gt;
Often source systems maintain 2 different columns as 
load_date and last_update_date. When extracting data based on &quot;last 
update date&quot;, ensure that source systems always populate &quot;last updated 
date&quot; field with &quot;load date&quot; when the record is first created.
&lt;/div&gt;
&lt;h3&gt;
What are the benefits of incremental loading of dimension tables?&lt;/h3&gt;
Once we extract records incrementally based on their last update 
date, we can compare each record with the target based on their natural 
keys and determine if the record is a new record or updated record.&lt;br /&gt;

However, if we do not extract incrementally (and every time extract 
all the records from source), then the number of records to compare 
against target will be much higher resulting into performance 
degradation. If we are doing incremental loading, records that do not 
have any change will not come - only new or updatable records will come.
 But if we are doing full load, everything will come irrespective of any
 change.
&lt;/div&gt;</content><link rel='replies' type='application/atom+xml' href='http://dwhetltool.blogspot.com/feeds/6210085845906514068/comments/default' title='Post Comments'/><link rel='replies' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/incremental-loading-for-dimension-table.html#comment-form' title='0 Comments'/><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/6210085845906514068'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/4025024079216879898/posts/default/6210085845906514068'/><link rel='alternate' type='text/html' href='http://dwhetltool.blogspot.com/2012/05/incremental-loading-for-dimension-table.html' title='Incremental Loading for Dimension Table'/><author><name>DWH</name><uri>http://www.blogger.com/profile/05065543203188335815</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><thr:total>0</thr:total></entry></feed>