<?xml version='1.0' encoding='UTF-8'?><rss xmlns:atom='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:georss='http://www.georss.org/georss' xmlns:gd='http://schemas.google.com/g/2005' xmlns:thr='http://purl.org/syndication/thread/1.0' version='2.0'><channel><atom:id>tag:blogger.com,1999:blog-4474430858172239827</atom:id><lastBuildDate>Fri, 23 Mar 2012 23:05:15 +0000</lastBuildDate><category>db2 partitioning</category><category>error handling in datastage</category><category>teradata</category><category>rolf</category><category>normal forms</category><category>sql in command stage</category><category>interview questions</category><category>Data distribution</category><category>citi bank</category><category>update table</category><category>funny telugu jokes</category><category>sort funnel</category><category>tera byte</category><category>datastage environment variables</category><category>3rd normal form</category><category>lmao</category><category>how to remove duplicates without using remove duplicates stage</category><category>oracle partitioning</category><category>difference between dataset and fileset</category><category>datastage faq's</category><category>datastage interview questions</category><category>raja scam</category><category>find invisible people in chat</category><category>datastage xml stages</category><category>symmetric multi processing</category><category>Teradata Permanent and Temporary Tables</category><category>warnings</category><category>performance tuning</category><category>facebook</category><category>datastage Fmt</category><category>car loan tricks</category><category>datastage performance tuning</category><category>configuration file</category><category>sql.create table</category><category>sybase partitioning</category><category>routing numbers</category><category>telugodaa majakka</category><category>error handling</category><category>lol</category><category>$PATH</category><category>fileset</category><category>sort stage</category><category>dataset</category><category>Data Access Methods</category><category>global temporary tables</category><category>config file datastage</category><category>referential integrity</category><category>get low intrest rates from dealer</category><category>invisible friends in yahoo</category><category>sequential file</category><category>Fileset vs dataset</category><category>massively parallel processing</category><category>path in linux</category><category>SMP and MPP</category><category>ls datastage</category><category>errors</category><category>optimization</category><category>aba</category><category>2g</category><category>funnel types</category><category>telangana</category><category>datastage errors</category><category>scam</category><category>byte</category><category>change mode</category><category>invisible friends in gtalk</category><category>lookup types</category><category>2g scam</category><category>apt_config_file</category><category>derived tables</category><category>search engine</category><category>yottabte</category><category>second normal form</category><category>rolfmao</category><category>xml output stage</category><category>large data</category><category>add blog to google</category><category>$PATH in unix</category><category>partitioning types</category><category>unix permissions</category><category>using sql in command stage</category><category>chmod</category><category>remove duplicates</category><category>sql server partitioning</category><category>path in unix</category><category>car</category><category>harish rao</category><category>hexa byte</category><category>find invisible user in gtalk</category><category>xml stage</category><category>Normalization</category><category>lookup stage</category><category>revoke</category><category>Difference between SMP and MPP</category><category>primary index</category><category>friend request</category><category>inida scam</category><category>continuous funnel</category><category>car loan tips</category><category>volatile tables</category><category>teradata partitioning</category><category>us</category><category>first normal form</category><category>index</category><category>datastage faq</category><category>Datastage transformations.datasatge Trim</category><category>find  invisible in yahoo</category><category>sql in job sequence</category><category>xml output</category><title>It's not a Blog and i'm not a Blogger</title><description></description><link>http://t-heartbeat.blogspot.com/</link><managingEditor>noreply@blogger.com (black-hawk)</managingEditor><generator>Blogger</generator><openSearch:totalResults>38</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-7438350504748455093</guid><pubDate>Fri, 09 Sep 2011 21:41:00 +0000</pubDate><atom:updated>2011-11-12T10:55:23.654-06:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>sql in command stage</category><category domain='http://www.blogger.com/atom/ns#'>sql in job sequence</category><category domain='http://www.blogger.com/atom/ns#'>datastage Fmt</category><category domain='http://www.blogger.com/atom/ns#'>using sql in command stage</category><category domain='http://www.blogger.com/atom/ns#'>Datastage transformations.datasatge Trim</category><category domain='http://www.blogger.com/atom/ns#'>ls datastage</category><title>Datastage Transformations</title><description>&lt;div dir="ltr" style="text-align: left;" trbidi="on"&gt;&lt;b&gt;Server Job Transformations:&lt;/b&gt;&lt;b&gt;&amp;nbsp;&lt;/b&gt;&lt;br /&gt;&lt;b&gt;1)To remove the zeroes from left&lt;/b&gt;&lt;br /&gt;function : Trim(columnName,'0','L')&lt;b&gt;&amp;nbsp;&lt;/b&gt;&lt;br /&gt;&lt;b&gt;2) To remove the zeroes from left&lt;/b&gt;&lt;br /&gt;function : Trim(columnName,'0','R')&lt;br /&gt;&lt;b&gt;3)Create spaces or create zeroes(padding) before or after the Input value&lt;/b&gt;&lt;br /&gt;Fmt(KeyMgtGetNextValue (DSJobStartTime),"8'0'R")&lt;br /&gt;Example :&lt;br /&gt;If Input : "1234"&lt;br /&gt;desired output : "00001234"&lt;br /&gt;If i/o : "123"&lt;br /&gt;o/p will be:"00000123"&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Parallel Job Transformations :&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;br /&gt;&lt;u&gt;&lt;b&gt;Sequence Job Transformations:&lt;/b&gt;&lt;/u&gt;&lt;br /&gt;&lt;b&gt;Command Stage :&amp;nbsp;&amp;nbsp;&lt;/b&gt;&lt;br /&gt;&lt;b&gt;List out the files with comma(,)&lt;/b&gt;&lt;br /&gt;ls -m #$FilePath##FolderPath# |tr '\n' ','| sed '$s/,$//'&lt;b&gt;&lt;/b&gt;&lt;br /&gt;&lt;b&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;b&gt;Use Sql in Command Stage :&amp;nbsp;&lt;/b&gt;&lt;br /&gt;&lt;b&gt;Ex :&amp;nbsp;&lt;/b&gt;&lt;br /&gt;command:&lt;br /&gt;nzsql (Netezza)&lt;br /&gt;&lt;br /&gt;Parameter:&lt;br /&gt;&lt;br /&gt;-host #$jpNZ_SERVER# -db #$jpNZ_DB_DB# -u #$jpNZ_USERID_DB# -pw #$jpNZ_PWD_DB# -t&amp;nbsp; -c "SELECT STRT_TMSTMP FROM #$jpNZ_DB_DB#.#$jpNZ_&lt;br /&gt;&lt;div dir="ltr"&gt;&lt;wbr&gt;&lt;/wbr&gt;TABLE_OWNER#.EXTRACT_&lt;wbr&gt;&lt;/wbr&gt;DRIVER WHERE PROC_NM='XYZ' " &lt;/div&gt;&lt;br /&gt;&lt;br /&gt;&lt;/div&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-7438350504748455093?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/09/datastage-transformations.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-6488692294601307137</guid><pubDate>Wed, 31 Aug 2011 16:35:00 +0000</pubDate><atom:updated>2011-08-31T11:35:15.618-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>get low intrest rates from dealer</category><category domain='http://www.blogger.com/atom/ns#'>car</category><category domain='http://www.blogger.com/atom/ns#'>car loan tricks</category><category domain='http://www.blogger.com/atom/ns#'>car loan tips</category><title>Car Loan Tips</title><description>&lt;b&gt;Run Your Credit Report&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&gt;Before embarking in your car buying journey, request your credit report from the three credit bureaus.  You can request your credit report for free once a year by visiting annualcreditreport.com or by calling 1-877-322-8228.  Your credit report will give you a glimpse of your creditworthiness and inform you of any possible shortcomings.  Knowing of all this before stepping into a dealership will guard you from the most aggressive selling tactics and help you walk away when the financing offered is not in your best interest.&lt;br /&gt;Car Loan Warning 	&lt;br /&gt;&lt;br /&gt;!&gt;Be careful to avoid paid credit reporting services.  Only annualcreditreport.com is authorized to request a free credit report for you under the law. Paid credit reporting services often carry hidden fees and undisclosed costs.&lt;br /&gt;----&lt;br /&gt;&lt;b&gt;Visit Your Nearest Bank or Credit Union To Get A Quote&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&gt;Once you have your credit report handy and a have a good idea of what type of car and price range you are looking for, head over to your nearest bank or credit union to see what kind of interest rates they are offering on their car loans. In some cases, particularly if you already know exactly what vehicle you want to purchase, the bank or credit union may pre-approve you, thus letting you know exactly what interest rate and monthly payments you should expect in your car loan.&lt;br /&gt;Car Loan Warning 	&lt;br /&gt;&lt;br /&gt;Be sure to shop around and to compare rates. Visit more than one financial institution to get a quote and to find out what interest rates they are offering on their loans. This will give you a better idea if you are getting a good deal or not.&lt;br /&gt;&lt;br /&gt;-----&lt;br /&gt;&lt;b&gt;Negotiate for a Better Rate&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&gt;Despite the loans offered directly by banks and credit unions, eight out of every 10 car buyers finance their vehicle through a car dealer. Whether it is the convenience offered or simply the marketing tactics deployed, if you find yourself behind closed-doors in the finance and insurance department of a car dealer be ready to negotiate for the lowest interest rate possible without feeling intimidated. Knowing your credit history and the loan rates offered directly from banks and credit unions in your area will definitely give you the upper-hand in getting the best car loan possible, but remain weary of any interest rate markups added on by the dealer. While a car dealer may initially originate your loan, it often attempts to sell the loan to a third-party lender for a profit. This profit is made by arbitrarily raising the interest rate of your car loan. If the interest rate offered by the dealer is higher than what you anticipated, just ask for the desired interest rate and renegotiate.&lt;br /&gt;Car Loan Warning 	&lt;br /&gt;&lt;br /&gt;&gt;Try to avoid any add-on products offered by the dealer. Products such as vehicle service contracts, guaranteed auto protection insurance, credit life and disability insurance, and many others are often overpriced and unnecessary. Car dealers often sell these products to raise the cost of their loans and increase their profit margins. If you really need any of those add-on products, try to purchase them outside the dealership for much cheaper.&lt;br /&gt;---&lt;br /&gt;&lt;b&gt;Other Things to Consider:&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Comparison Shop Online: The internet has made it a lot easier for consumers to compare car prices and loan rates online. Start your research there before you head out to the dealership.&lt;br /&gt;&lt;br /&gt;“Yo-Yo” scams: “Yo-yo” scams or “spot deliveries” occur when a car buyer drives away with the vehicle without finalizing sale. Once home, the dealer will call back the buyer claiming that it was unable to fund the loan at the agreed-upon terms. The buyer must then return the car to the dealer and often renegotiate the loan at a higher interest rate than one agreed-upon before.&lt;br /&gt;&lt;br /&gt;“Buy Here and Pay Here” Dealers: “Buy Here Pay Here” dealerships typically finance used auto loans in-house to borrowers with no or poor credit. The average APR is usually much higher than a bank or credit union loan. The car loans made by these dealers are often unsustainable and lead to a high rate of repossessions.&lt;br /&gt;&lt;br /&gt;Take Your Time: The average consumer spends 45 minutes with the finance and insurance department at the dealer (only 27 minutes if they take a test drive), so take your time to consider your lending options and don’t feel pressured to sign the dotted line. You have the right to take the entire paperwork home before agreeing to the loan.&lt;br /&gt;&lt;br /&gt;Don’t Get Caught In The Monthly Payment Trap: Dealers will often attempt to mask the true cost of their loans by focusing on the monthly payments. Be sure to compare the total cost of all the loans offered and to choose the one that is less costly to you in the long run.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-6488692294601307137?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/08/car-loan-tips.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-1948486233468728748</guid><pubDate>Tue, 16 Aug 2011 18:28:00 +0000</pubDate><atom:updated>2011-08-16T13:28:54.619-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>datastage xml stages</category><category domain='http://www.blogger.com/atom/ns#'>xml stage</category><category domain='http://www.blogger.com/atom/ns#'>xml output</category><category domain='http://www.blogger.com/atom/ns#'>xml output stage</category><title>Generating output files for multiple columns</title><description>When two or more output columns have XPath expressions, XML Output generates a file for each column. You must add a column index flag to the root filename to prevent overwriting. This creates a naming pattern. &lt;br /&gt;&lt;br /&gt;Valid flags include:&lt;br /&gt; %% Column position, starting with zero (0) &lt;br /&gt; %@ Column names&lt;br /&gt;  You can add these flags before, within, or after the root filename. &lt;br /&gt;Examples The first output column is CUSTOMERS. The second output column is DIVISIONS. &lt;br /&gt;&lt;b&gt;1. The naming pattern is acme%%.xml. XML Output generates two files, called acme0.xml and acme1.xml.&lt;br /&gt;2. The naming pattern is acme%@.xml. XML Output generates two files, called acmeCUSTOMERS.xml and acmeDIVISIONS.xml.&lt;/b&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-1948486233468728748?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/08/generating-output-files-for-multiple.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-5059215843506776861</guid><pubDate>Mon, 08 Aug 2011 16:06:00 +0000</pubDate><atom:updated>2011-08-08T11:06:44.229-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>apt_config_file</category><category domain='http://www.blogger.com/atom/ns#'>configuration file</category><category domain='http://www.blogger.com/atom/ns#'>config file datastage</category><title>APT_CONFIG_FILE : CONFIGURATION FILE</title><description>1)&lt;b&gt;APT_CONFIG_FILE&lt;/b&gt; is the file using which DataStage determines the configuration file (one can have many configuration files for a project) to be used. In fact, this is what is generally used in production. However, if this environment variable is not defined then how DataStage determines which file to use?&lt;br /&gt;&lt;br /&gt;    1)If the APT_CONFIG_FILE environment variable is not defined then DataStage look for default configuration file (config.apt) in following path:&lt;br /&gt;        1)Current working directory.&lt;br /&gt;        2)INSTALL_DIR/etc, where INSTALL_DIR ($APT_ORCHHOME) is the top level directory of DataStage installation.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;2)What are the different options a logical node can have in the configuration file?&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;    1.fastname – The fastname is the physical node name that stages use to open connections for high volume data transfers. The attribute of this option is often the network name. Typically, you can get this name by using Unix command ‘uname -n’.&lt;br /&gt;    2.pools – Name of the pools to which the node is assigned to. Based on the characteristics of the processing nodes you can group nodes into set of pools.&lt;br /&gt;        1.A pool can be associated with many nodes and a node can be part of many pools.&lt;br /&gt;        2.A node belongs to the default pool unless you explicitly specify apools list for it, and omit the default pool name (“”) from the list.&lt;br /&gt;        3.A parallel job or specific stage in the parallel job can be constrained to run on a pool (set of processing nodes).&lt;br /&gt;            1.In case job as well as stage within the job are constrained to run on specific processing nodes then stage will run on the node which is common to stage as well as job.&lt;br /&gt;    3.resource – resource resource_type “location” [{pools “disk_pool_name”}]  | resource resource_type “value” . resource_type can be canonicalhostname (Which takes quoted ethernet name of a node in cluster that is unconnected to Conductor node by the hight speed network.) or disk (To read/write persistent data to this directory.) or scratchdisk (Quoted absolute path name of a directory on a file system where intermediate data will be temporarily stored. It is local to the processing node.) or RDBMS Specific resourses (e.g. DB2, INFORMIX, ORACLE, etc.)&lt;br /&gt;&lt;br /&gt;&lt;b&gt;3)How datastage decides on which processing node a stage should be run?&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;    1.If a job or stage is not constrained to run on specific nodes then parallel engine executes a parallel stage on all nodes defined in the default node pool. (Default Behavior)&lt;br /&gt;    2.If the node is constrained then the constrained processing nodes are choosen while executing the parallel stage. (Refer to 2.2.3 for more detail).&lt;br /&gt;&lt;br /&gt;4)When configuring an MPP, you specify the physical nodes in your system on which the parallel engine will run your parallel jobs. This is called Conductor Node. For other nodes, you do not need to specify the physical node.  Also, You need to copy the (.apt) configuration file only to the nodes from which you start parallel engine applications. It is possible that conductor node is not connected with the high-speed network switches. However, the other nodes are connected to each other using a very high-speed network switches. How do you configure your system so that you will be able to achieve optimized parallelism?&lt;br /&gt;&lt;br /&gt;    1.Make sure that none of the stages are specified to be run on the conductor node.&lt;br /&gt;    2.Use conductor node just to start the execution of parallel job.&lt;br /&gt;    3.Make sure that conductor node is not the part of the default pool. &lt;br /&gt;&lt;b&gt;&lt;br /&gt;5)Although, parallelization increases the throughput and speed of the process, why maximum parallelization is not necessarily the optimal parallelization?&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;    Datastage creates one process for every stage for each processing node.  Hence, if the hardware resource is not available to support the maximum parallelization, the performance of overall system goes down. For example, suppose we have a SMP system with three CPU and a Parallel job with 4 stage. We have 3 logical node (one corresponding to each physical node (say CPU)). Now DataStage will start 3*4 = 12 processes, which has to be managed by a single operating system. Significant time will be spent in switching context and scheduling the process.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;6)Since we can have different logical processing nodes, it is possible that some node will be more suitable for some stage while other nodes will be more suitable for other stages. So, when to decide which node will be suitable for which stage?&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;    1.If a stage is performing a memory intensive task then it should be run on a node which has more disk space available for it. E.g. sorting a data is memory intensive task and it should be run on such nodes.&lt;br /&gt;    2.If some stage depends on licensed version of software (e.g. SAS Stage, RDBMS related stages, etc.) then you need to associate those stages with the processing node, which is physically mapped to the machine on which the licensed software is installed. (Assumption: The machine on which licensed software is installed is connected through other machines using high speed network.)&lt;br /&gt;    3.If a job contains stages, which exchange large amounts of data then they should be assigned to nodes where stages communicate by either shared memory (SMP) or high-speed link (MPP) in most optimized manner. &lt;br /&gt;&lt;br /&gt;&lt;b&gt;7)Basically nodes are nothing but set of machines (specially in MPP systems). You start the execution of parallel jobs from the conductor node. Conductor nodes creates a shell of remote machines (depending on the processing nodes) and copies the same environment on them. However, it is possible to create a startup script which will selectively change the environment on a specific node. This script has a default name of startup.apt. However, like main configuration file, we can also have many startup configuration files. The appropriate configuration file can be picked up using the environment variable APT_STARTUP_SCRIPT. What is use of APT_NO_STARTUP_SCRIPT environment variable?&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;    1.Using APT_NO_STARTUP_SCRIPT environment variable, you can instruct Parallel engine not to run the startup script on the remote shell.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;8)What are the generic things one must follow while creating a configuration file so that optimal parallelization can be achieved?&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;    1.Consider avoiding the disk/disks that your input files reside on.&lt;br /&gt;    2.Ensure that the different file systems mentioned as the disk and scratchdisk resources hit disjoint sets of spindles even if they’re located on a RAID (Redundant Array of Inexpensive Disks) system.&lt;br /&gt;    3.Know what is real and what is NFS:&lt;br /&gt;        1.Real disks are directly attached, or are reachable over a SAN (storage-area network -dedicated, just for storage, low-level protocols).&lt;br /&gt;        2.Never use NFS file systems for scratchdisk resources, remember scratchdisk are also used for temporary storage of file/data during processing.&lt;br /&gt;        3.If you use NFS file system space for disk resources, then you need to know what you are doing. For example, your final result files may need to be written out onto the NFS disk area, but that doesn’t mean the intermediate data sets created and used temporarily in a multi-job sequence should use this NFS disk area. Better to setup a “final” disk pool, and constrain the result sequential file or data set to reside there, but let intermediate storage go to local or SAN resources, not NFS.&lt;br /&gt;   4.Know what data points are striped (RAID) and which are not. Where possible, avoid striping across data points that are already striped at the spindle level.&lt;br /&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-5059215843506776861?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/08/aptconfigfile-configuration-file.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-5724857765095367222</guid><pubDate>Fri, 24 Jun 2011 20:25:00 +0000</pubDate><atom:updated>2011-06-24T15:26:10.065-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>$PATH in unix</category><category domain='http://www.blogger.com/atom/ns#'>path in unix</category><category domain='http://www.blogger.com/atom/ns#'>$PATH</category><category domain='http://www.blogger.com/atom/ns#'>path in linux</category><title>The PATH Environment Variable in UNIX</title><description>The PATH environment variable has a special format. Let's see what it looks like:&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ echo $PATH&lt;br /&gt;/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:.&lt;br /&gt;&lt;br /&gt;It's essentially a :-separated list of directories. When you execute a command, the shell searches through each of these directories, one by one, until it finds a directory where the executable exists. Remember that we found ls in /bin, right? /bin is the second item in the PATH variable. So let's remove /bin from PATH. We can do this by using the export command:&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ export PATH=/usr/local/bin:/usr/bin:/sbin:/usr/sbin:.&lt;br /&gt;&lt;br /&gt;Make sure that the variable is set correctly:&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ echo $PATH&lt;br /&gt;/usr/local/bin:/usr/bin:/sbin:/usr/sbin:.&lt;br /&gt;&lt;br /&gt;Now, if we try to run ls, the shell no longer knows to look in /bin!&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ ls&lt;br /&gt;-bash: ls: command not found&lt;br /&gt;&lt;br /&gt;As expected, ls can no longer be found. Let's add /bin back to PATH, as ls is a very useful thing to have.&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ export PATH=/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Adding to PATH&lt;/b&gt;&lt;br /&gt;There are many times where you'll want to append an item to PATH. First, let's see what the current PATH is:&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ echo $PATH&lt;br /&gt;/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:.&lt;br /&gt;&lt;br /&gt;The way to add a directory is as follows:&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ export PATH=$PATH:/new/path&lt;br /&gt;&lt;br /&gt;This command adds /new/path to PATH. Let's see if it got updated:&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ echo $PATH&lt;br /&gt;/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:.:/new/path&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Making this happen every time you login&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;There's a special file in your home directory called .bashrc In UNIX, a convention is that files beginning with . are configuration files, and thus should be hidden from view. ls will only list files beginning with a . if passed the -a flag. e.g.&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ ls -a&lt;br /&gt;&lt;br /&gt;At any rate, this file (.bashrc), simply contains a list of commands. Each one of these commands gets executed every time you create a new shell.&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ cat .bashrc&lt;br /&gt;export PATH="$PATH:/p/firefox/bin"&lt;br /&gt;..&lt;br /&gt;&lt;br /&gt;Every time a shell is started, /p/firefox/bin is added to PATH. If you wish to have certain directories automatically added to PATH, simply place those commands at the end of this file. Log out and log back in to view the changes. Alternatively, you can load the contents of that file in the current session:&lt;br /&gt;&lt;br /&gt;dbettis@rhino[~]$ . .bashrc&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-5724857765095367222?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/path-environment-variable.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-1062830460612888677</guid><pubDate>Fri, 24 Jun 2011 02:37:00 +0000</pubDate><atom:updated>2011-06-23T21:37:25.997-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>chmod</category><category domain='http://www.blogger.com/atom/ns#'>unix permissions</category><category domain='http://www.blogger.com/atom/ns#'>change mode</category><title>Understanding UNIX permissions and chmod</title><description>&lt;b&gt;Introduction&lt;/b&gt;&lt;br /&gt;This is a topic that has been beaten to death both in books and on-line. For some reason, it seems that it is one of the most common misunderstandings that people have to face when learning how to write and/or configure their first cgi programs. This tutorial aims to clarify the concepts involved. Note that we will be referring to UNIX in a generic sense in this article. Most of what we are going to discuss here applies to all UNIX flavours. (such as Linux, SVR4, BSD etc.) It is also a good idea to type man chmod to check for the specific details on your system, too.&lt;br /&gt;&lt;b&gt;Users&lt;/b&gt;&lt;br /&gt;A UNIX system serves many users. Users are an abstraction that denotes a logical entity for assignment of ownership and operation privileges over the system. A user may correspond to a real-world person, but also a type of system operation. So, in my system, I have user 'nick' that corresponds to me, but I also have user 'www' which corresponds to the privileges necessary to operate the local webserver. UNIX doesn't care about what the user means for me. It just knows what belongs to any given user and what each user is allowed to do with any given thing (file, program, device, etc) on the system. UNIX identifies each user by a User ID (UID) and the username (or login) such as 'nick' and 'www' is just an alias to the UID that makes humans more comfortable.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Groups&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Users can be organized in groups. A user may belong to one or more groups of users. The concept of groups serves the purpose of assigning sets of privileges for a given resource and sharing them among many users that need to have them. (perhaps because they are all members of a project working team and they all need access to some common project files) So, on my system user 'nick' and user 'www' both belong to the group 'perlfect'. This way, they can have some shared privileges over the files for this site. User 'nick' needs them to edit the site, and user 'www' needs them to manage the webserver that will be publishing the site.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Ownership&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Every file in UNIX has an owner user and an owner group. So, for any file in the system, user 'nick' may have one of the following ownership relations:&lt;br /&gt;&lt;br /&gt;    nick owns the file, i.e. the file's owner is 'nick'.&lt;br /&gt;    nick is a member of the group that owns the file, i.e. the file's owner group is 'perlfect'.&lt;br /&gt;    nick is neither the owner, nor belonging to the group that owns the file&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Permissions&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Every file on the system has associated with it a set of permissions. Permissions tell UNIX what can be done with that file and by whom. There are three things you can (or can't) do with a given file:&lt;br /&gt;&lt;br /&gt;    read it,&lt;br /&gt;    write (modify) it and&lt;br /&gt;    execute it.&lt;br /&gt;&lt;br /&gt;Unix permissions specify which of the above operations can be performed for any ownership relation with respect to the file. In simpler terms, what can the owner do, what can the owner group do, and what can everybody else do with the file. For any given ownership relation, we need three bits to specify access permissions: the first to denote read (r) access, the second to denote (w) access and the third to denote execute (x) access. We have three ownership relations: 'owner', 'group' and 'all' so we need a triplet for each, resulting in nine bits. Each bit can be set or clear. (not set) We mark a set bit by it's corresponding operation letter (r, w or x) and a clear bit by a dash (-) and put them all on a row. An example might be rwxr-xr-x.What this means is that the owner can do anything with the file, but group owners and the rest of the world can only read or execute it. Usually in UNIX there is also another bit that precedes this 9-bit pattern. You do not need to know about it, at least for the time being.&lt;br /&gt;&lt;br /&gt;So if you try ls -l on the command prompt you will get something like the following: [nick@thekla src]$ ls -l -rwxr-xr-x 1 nick users 382 Jan 19 11:49 bscoped.pl drwxr-xr-x 3 nick users 1024 Jan 19 11:19 lib/ -rwxr-xr-x 1 nick users 1874 Jan 19 10:23 socktest.pl&lt;br /&gt;&lt;br /&gt;The first column here shows the permission bit pattern for each file. The third column shows the owner, and the fourth column shows the owner group. By the time, the information provided by ls -l should be enough for you to figure out what each user of the system can do with any of the files in the directory.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Directories&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Another interesting thing to note is that lib/ which is a directory has permissions, too. Permissions take a different meaning for directories. Here's what they mean:&lt;br /&gt;&lt;br /&gt;   1) read determines if a user can view the directory's contents, i.e. do ls in it.&lt;br /&gt;   2)write determines if a user can create new files or delete file in the directory. (Note here that this essentially means that a user with write access toa directory can delete files in the directory even if he/she doesn't have write permissions for the file! So be careful with this.)&lt;br /&gt;   3)execute determines if the user can cd into the directory.&lt;br /&gt;&lt;b&gt;&lt;br /&gt;chmod&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;To set/modify a file's permissions you need to use the chmod program. Of course, only the owner of a file may use chmod to alter a file's permissions. chmod has the following syntax: chmod [options] mode file(s)&lt;br /&gt;&lt;br /&gt;The 'mode' part specifies the new permissions for the file(s) that follow as arguments. A mode specifies which user's permissions should be changed, and afterwards which access types should be changed. Let's say for example: chmod a-x socktest.pl This means that the execute bit should be cleared (-) for all users. (owner, group and the rest of the world) The permissions start with a letter specifying what users should be affected by the change, this might be any of the following:&lt;br /&gt;&lt;br /&gt; &lt;b&gt;   u the owner user&lt;br /&gt;    g the owner group&lt;br /&gt;    o others (neither u, nor g)&lt;br /&gt;    a all users&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;This is followed by a change instruction which consists of a +(set bit) or -(clear bit) and the letter corresponding to the bit that should be changed.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Let's see some examples: &lt;/b&gt;&lt;br /&gt;$ ls -l socktest.pl &lt;br /&gt;-rwxr-xr-x 1 nick users 1874 Jan 19 10:23 socktest.pl*                             $ chmod a-x socktest.pl &lt;br /&gt;$ ls -l socktest.pl -rw-r--r-- 1 nick users 1874 Jan 19 10:23 socktest.pl          $ chmod g+w socktest.pl $ ls -l socktest.pl -rw-rw-r-- 1 nick users 1874 Jan 19 10:23 socktest.pl  &lt;br /&gt;$ chmod ug+x socktest.pl &lt;br /&gt;$ ls -l socktest.pl -rwxrwxr-- 1 nick users 1874 Jan 19 10:23 socktest.pl* &lt;br /&gt;$ chmod ug-wx socktest.pl $ ls -l socktest.pl -r--r--r-- 1 nick users 1874 Jan 19 10:23 socktest.pl&lt;br /&gt;&lt;br /&gt;Strange numbers...&lt;br /&gt;&lt;br /&gt;You might have encountered things like chmod 755 somefile and of course you will be wondering what this is. The thing is, that you can change the entire permission pattern of a file in one go using one number like the one in this example. Every mode has a corresponding code number, and as we shall see there is a very simple way to figure out what number corresponds to any mode.&lt;br /&gt;&lt;br /&gt;Every one of the three digits on the mode number corresponds to one of the three permission triplets. (u, g and o) Every permission bit in a triplet corresponds to a value: 4 for r, 2 for w, 1 for x. If the permission bit you add this value to the number of the permission triplet. If it is cleared, then you add nothing. (Some of you might notice that in fact, the number for a triplet is the octal value corresponding to the three-bit pattern - if you don't know what an octal value is, it doesn't really matter, just follow the intstructions) So if a file has rwxr-xr-x permissions we do the following calculation:&lt;br /&gt;&lt;br /&gt;Triplet for u: rwx =&gt; 4 + 2 + 1 = 7&lt;br /&gt;Triplet for g: r-x =&gt; 4 + 0 + 1 = 5&lt;br /&gt;Tripler for o: r-x =&gt; 4 + 0 + 1 = 5&lt;br /&gt;Which makes : 755&lt;br /&gt;&lt;br /&gt;So, 755 is a terse way to say 'I don't mind if other people read or run this file, but only I should be able to modify it' and 777 means 'everyone has full access to this file'&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-1062830460612888677?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/understanding-unix-permissions-and.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-7923450282148409312</guid><pubDate>Thu, 23 Jun 2011 17:00:00 +0000</pubDate><atom:updated>2011-06-24T17:17:02.709-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>sql server partitioning</category><category domain='http://www.blogger.com/atom/ns#'>teradata partitioning</category><category domain='http://www.blogger.com/atom/ns#'>oracle partitioning</category><category domain='http://www.blogger.com/atom/ns#'>partitioning types</category><category domain='http://www.blogger.com/atom/ns#'>db2 partitioning</category><category domain='http://www.blogger.com/atom/ns#'>sybase partitioning</category><title>Databases and partitioning types</title><description>&lt;b&gt;ORACLE:&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Basics of Partitioning&lt;/b&gt;&lt;br /&gt;Partitioning allows a table, index or index-organized table to be subdivided into&lt;br /&gt;smaller pieces. Each piece of the database object is called a partition. Each&lt;br /&gt;partition has its own name, and may optionally have its own storage characteristics. From the perspective of a database administrator, a partitioned&lt;br /&gt;object has multiple pieces that can be managed either collectively or individually.&lt;br /&gt;This gives the administrator considerable flexibility in managing partitioned&lt;br /&gt;&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;a href="http://2.bp.blogspot.com/-kg8R2RQV2w4/TgUK2n0MYtI/AAAAAAAAHWA/FcXgrSgbyfc/s1600/oracle_parti.bmp" imageanchor="1" style="margin-left:1em; margin-right:1em"&gt;&lt;img border="0" height="200" width="320" src="http://2.bp.blogspot.com/-kg8R2RQV2w4/TgUK2n0MYtI/AAAAAAAAHWA/FcXgrSgbyfc/s320/oracle_parti.bmp" /&gt;&lt;/a&gt;&lt;/div&gt;&lt;br /&gt;&lt;br /&gt;object. However, from the perspective of the application, a partitioned table is&lt;br /&gt;identical to a non-partitioned table; no modifications are necessary when&lt;br /&gt;accessing a partitioned table using SQL DML commands.&lt;br /&gt;Figure 1: Application and DBA perspective of a partitioned table&lt;br /&gt;Database objects - tables, indexes, and index-organized tables - are partitioned&lt;br /&gt;using a 'partitioning key', a set of columns which determine in which partition a&lt;br /&gt;given row will reside. For example the sales table shown in figure 1 is rangepartitioned&lt;br /&gt;on sales date, using a monthly partitioning strategy; the table appears&lt;br /&gt;to any application as a single, 'normal' table. However, the DBA can manage and&lt;br /&gt;store each monthly partition individually, potentially using different storage tiers,&lt;br /&gt;applying table compression to the older data, or store complete ranges of older&lt;br /&gt;data in read only tablespaces.&lt;br /&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Basic Partitioning Strategies&lt;/b&gt;&lt;br /&gt;Oracle Partitioning offers three fundamental data distribution methods that control&lt;br /&gt;how the data is actually going to placed into the various individual partitions,&lt;br /&gt;namely:&lt;br /&gt;&lt;b&gt;Range:&lt;/b&gt; The data is distributed based on a range of values of the&lt;br /&gt;partitioning key (for a date column as the partitioning key, the 'January-&lt;br /&gt;2007' partition contains rows with the partitioning-key values between&lt;br /&gt;'01-JAN-2007' and '31-JAN-2007'). The data distribution is a continuum&lt;br /&gt;without any holes and the lower boundary of a range is automatically&lt;br /&gt;defined by the upper boundary of the preceding range.&lt;br /&gt;&lt;b&gt;List:&lt;/b&gt; The data distribution is defined by a list of values of the partitioning&lt;br /&gt;key (for a region column as the partitioning key, the 'North America'&lt;br /&gt;partition may contain values 'Canada', 'USA', and 'Mexico'). A special&lt;br /&gt;'DEFAULT' partition can be defined to catch all values for a partition key&lt;br /&gt;that are not explicitly defined by any of the lists.&lt;br /&gt;&lt;b&gt;Hash:&lt;/b&gt; A hash algorithm is applied to the partitioning key to determine&lt;br /&gt;the partition for a given row. Unlike the other two data distribution&lt;br /&gt;methods, hash does not provide any logical mapping between the data&lt;br /&gt;and any partition.&lt;br /&gt;&lt;b&gt;Using the above-mentioned data distribution methods, a table can be partitioned&lt;br /&gt;either as single or composite partitioned table:&lt;/b&gt;&lt;br /&gt;&lt;b&gt;Single (one-level) Partitioning:&lt;/b&gt; A table is defined by specifying one of&lt;br /&gt;the data distribution methodologies, using one or more columns as the&lt;br /&gt;partitioning key. For example consider a table with a number column as&lt;br /&gt;the partitioning key and two partitions 'less_than_five_hundred' and&lt;br /&gt;'less_than_thousand', the 'less_than_thousand' partition contains rows&lt;br /&gt;where the following condition is true: 500 &lt;= Partitioning key &lt;1000You can specify Range, List, and Hash partitioned tables.&lt;/n&gt;&lt;br /&gt;&lt;b&gt;Composite Partitioning:&lt;/b&gt; A combination of two data distribution&lt;br /&gt;methods are used to define a composite partitioned table. First, the table&lt;br /&gt;is partitioned by data distribution method one and then each partition is&lt;br /&gt;further subdivided into subpartitions using a second data distribution&lt;br /&gt;method. All sub-partitions for a given partition together represent a&lt;br /&gt;logical subset of the data. For example, a range-hash composite&lt;br /&gt;partitioned table is first range-partitioned, and then each individual rangepartition&lt;br /&gt;is further sub-partitioned using the hash partitioning technique.&lt;br /&gt;Available composite partitioning techniques are range-hash, range-list,&lt;br /&gt;range-range, list-range, list-list, and list-hash.&lt;br /&gt;Index-organized tables (IOTs) can be partitioned using range, hash, and&lt;br /&gt;list partitioning. Composite partitioning is not supported for IOTs.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-7923450282148409312?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/databases-and-partitioning-types.html</link><author>noreply@blogger.com (black-hawk)</author><media:thumbnail xmlns:media='http://search.yahoo.com/mrss/' url='http://2.bp.blogspot.com/-kg8R2RQV2w4/TgUK2n0MYtI/AAAAAAAAHWA/FcXgrSgbyfc/s72-c/oracle_parti.bmp' height='72' width='72'/><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-1782046147959955999</guid><pubDate>Wed, 22 Jun 2011 17:24:00 +0000</pubDate><atom:updated>2011-06-22T12:24:19.395-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>SMP and MPP</category><category domain='http://www.blogger.com/atom/ns#'>Difference between SMP and MPP</category><category domain='http://www.blogger.com/atom/ns#'>symmetric multi processing</category><category domain='http://www.blogger.com/atom/ns#'>massively parallel processing</category><title>Difference between SMP and MPP</title><description>&lt;b&gt;SMP (symmetric multiprocessing&lt;/b&gt;), in which some hardware resources might be shared among processors. The processors communicate via shared memory and have a single operating system.&lt;br /&gt;Or&lt;br /&gt;Symmetric Multi-Processing. In a symmetrical multi-processing environment, the CPU's share the same memory,and as a result code running in one CPU can affect the &lt;br /&gt;memory used by another.&lt;br /&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Cluster or MPP (massively parallel processing)&lt;/b&gt;, also known as shared-nothing, in which each processor has exclusive access to hardware resources. MPP systems are physically housed in the same box, whereas cluster systems can be physically dispersed. The processors each have their own operating system, and communicate via a high-speed network.&lt;br /&gt;Or&lt;br /&gt;MPP - Massively Parallel Processing. computer system with many independent arithmetic units or entiremicroprocessors, that run in parallel.&lt;br /&gt;&lt;br /&gt;-----------------------------------------------Or&lt;br /&gt;Smp supports limited parellelism i.e 64 processors where as&lt;br /&gt;MPP can support N number of nodes or processors [high&lt;br /&gt;performance]&lt;br /&gt;Smp processing is SEQUENTIAL where As &lt;br /&gt;MPP Processing can be PARALLEL&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-1782046147959955999?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/difference-between-smp-and-mpp.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-7916502670831091494</guid><pubDate>Mon, 20 Jun 2011 01:43:00 +0000</pubDate><atom:updated>2011-06-19T20:53:32.060-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>sequential file</category><category domain='http://www.blogger.com/atom/ns#'>difference between dataset and fileset</category><category domain='http://www.blogger.com/atom/ns#'>Fileset vs dataset</category><title>Difference between Dataset ,Fileset and sequential file</title><description>&lt;b&gt;Dataset:&lt;/b&gt;&lt;br /&gt;0). Data set is the internally data format behind Orchestrate framework, so any other data being processed as source in parallel job would be converted into data set format first(it is handled by the operator "import") and also being processed as target would be converted from data set format last(it is handled by the operator "export"). Hence, data set usually could bring highest performance.&lt;br /&gt;1) It stores data in binary in the internal format of DataStage so, it takes less time to read/write from dataset than any other source/target.&lt;br /&gt;2)It preserves the partioning schemes so that you don't have to partition it again.&lt;br /&gt;3)You cannot view data without datastage &lt;br /&gt;&lt;br /&gt;&lt;b&gt;Fileset:&lt;/b&gt;&lt;br /&gt;0) Both .ds file and .fs file are the descriptor file of data set and file set respectively, whereas .fs file is stored as ASCII format, so you could directly open it to see the path of data file and its schema. However, .ds file cannot be open directly, and you could follow alternative way to achieve that, Data Set Management, the utility in client tool(such as Designer and Manager), and command line ORCHADMIN. &lt;br /&gt;1)It stores data in the format similar to a sequential file.&lt;br /&gt;2)Only advantage of using fileset over a sequential file is "it preserves partioning scheme"&lt;br /&gt;3)You can view the data but in the order defined in partitioning scheme&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-7916502670831091494?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/dataset-vs-fileset.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-8863153001014897296</guid><pubDate>Mon, 20 Jun 2011 01:36:00 +0000</pubDate><atom:updated>2011-06-19T20:36:10.169-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>lookup stage</category><category domain='http://www.blogger.com/atom/ns#'>dataset</category><category domain='http://www.blogger.com/atom/ns#'>lookup types</category><category domain='http://www.blogger.com/atom/ns#'>fileset</category><title>parallel lookup types</title><description>&lt;b&gt;Parallel DataStage jobs can have many sources of reference data for lookups including database tables, sequential files or native datasets. Which is the most efficient?&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;This question has popped up several times over on the DSExchange. In DataStage server jobs the answer is quite simple, local hash files are the fastest method of a key based lookup, as long as the time taken to build the hash file does not wipe out your benefits from using it.&lt;br /&gt;In a parallel job there are a very large number of stages that can be used as a lookup, a much wider variety than server jobs, this includes most data sources and the parallel staging formats of datasets and lookup filesets. I have discounted database lookups as the overhead of the database connectivity and any network passage makes them slower than most local storage.&lt;br /&gt;&lt;br /&gt;someone did a test comparing datasets to sequential files to lookup filesets and increased row volumes to see how they responded. The test had three jobs, each with a sequential file input stage and a reference stage writing to a copy stage.&lt;br /&gt;&lt;br /&gt;Small lookups&lt;br /&gt;I set the input and lookup volumes to 1000 rows. All three jobs processed in 17 or 18 seconds. No lookuptables were created apart from the existing lookup fileset one. This indicates the lookup data fit into memory and did not overflow to a resource file.&lt;br /&gt;&lt;br /&gt;1 Million Row Test&lt;br /&gt;The lookup dataset took 35 seconds, the lookup fileset took 18 seconds and the lookup sequential file took 35 seconds even though it had to partition the data. I assume this is because the input also had to be partitioned and this was the bottleneck in the job.&lt;br /&gt;&lt;br /&gt;2 million rows&lt;br /&gt;Starting to see some big differences now. Lookup fileset down at 45 seconds is only three times the length of the 1000 row test. Dataset is up to 1:17 and sequential file up to 1:32. The cost of partitioning the lookup data is really showing now.&lt;br /&gt;&lt;br /&gt;3 million rows&lt;br /&gt;The filset still at 45 seconds, swallowed up the extra 1 million rows with ease. Dataset up to 2:06 and the sequential file up to 2:20.&lt;br /&gt;&lt;br /&gt;As a final test I replaced the lookup stage with a join stage and tested the dataset and sequential file reference links. The dataset join finished in 1:02 and the sequential file join finished in 1:15. A large join proved faster then a large lookup but not as fast as a lookup file.&lt;br /&gt;&lt;br /&gt;Conclusion&lt;br /&gt;If your lookup size is low enough to fit into memory then the source is irrelevent, they all load up very quickly, even database lookups are fast. If you have very large lookup files spilling into lookup table resources then the lookup fileset outstrips the other options. A join becomes a viable option. They are a bit harder to design as you can only join one source at a time whereas a lookup can join multiple sources.&lt;br /&gt;&lt;br /&gt;I usually go with lookups for code to description or code to key type lookups regardless of the size, I reserve the joins for references that bring back lots of columns. I will certainly be making more use of the lookup fileset to get more performance from jobs.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-8863153001014897296?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/parallel-lookup-types.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-6377435901417752264</guid><pubDate>Mon, 20 Jun 2011 01:12:00 +0000</pubDate><atom:updated>2011-06-19T20:15:40.747-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>datastage performance tuning</category><category domain='http://www.blogger.com/atom/ns#'>performance tuning</category><category domain='http://www.blogger.com/atom/ns#'>datastage environment variables</category><title>Optimzie your DataStage Job Performance with relevant Environment Variables</title><description>DataStage has many parameters which can be tweaked and used to optimize the performance of various DataStage Jobs. Even many available to collect more information during the event of crash to get more traces.&lt;br /&gt;For any DataStage Job if you run into problem or want to get more details need to check following variables.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;$APT_CONFIG_FILE:&lt;/b&gt; This allows you to define Configuration file based on your requirement. You can keep many configuration files with n-node combination and assign it dynamically for Job based in criteria or time.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;$APT_SCORE_DUMP:&lt;/b&gt; It creates a job run report that shows the partitioning used, degree of parallelism, data buffering and inserted operators. It is Useful for finding out what your high volume job is doing.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;$APT_PM_PLAYER_TIMING:&lt;/b&gt; This option lets you see what each operator in a job is doing, especially how much data they are handling and how much CPU they are consuming. It helps in identifying various bottlenecks.&lt;br /&gt;&lt;br /&gt;One way to speed up very high volume jobs is to pre-sort the data and make sure it is not resorted in the DataStage job.Â  This is done by turning off auto sorting in high volume jobs:&lt;br /&gt;&lt;b&gt;APT_NO_SORT_INSERTION:&lt;/b&gt; stops the job from automatically adding a sort command to the start of a job that has stages that need sorted data such as Remove Duplicates.You can also add a sort stage to the job and set it to a value of "Previously Sorted" to avoid this is a specific job path.&lt;br /&gt;&lt;br /&gt;Please Refer to the following link for more details:&lt;br /&gt;&lt;a href="http://it.toolbox.com/blogs/infosphere/using-datastage-8-parameter-sets-to-tame-environment-variables-25821?sms_ss=blogger&amp;at_xt=4db8f7964e5fd65b%2C0"&gt;Optimzie your DataStage Job Performance with relevant Environment Variables&lt;/a&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-6377435901417752264?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/optimzie-your-datastage-job-performance.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-5947766622243508095</guid><pubDate>Mon, 20 Jun 2011 00:57:00 +0000</pubDate><atom:updated>2011-09-10T17:56:51.550-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>error handling in datastage</category><category domain='http://www.blogger.com/atom/ns#'>error handling</category><category domain='http://www.blogger.com/atom/ns#'>datastage errors</category><title>Error Handling in Datastage Job Design</title><description>&lt;div dir="ltr" style="text-align: left;" trbidi="on"&gt;There are multiple ways to handle Errors in Data or in DataStage Jobs.&lt;br /&gt;&lt;br /&gt;We can use the Reject link Option via Transformer Stage and also Reject Link Option from within Connector Stages. If we face issue in Job Sequences, We can use the "Exception Handler" activity. Here is how we can call use this activity from with in DataStage Job Sequence.&lt;br /&gt;&lt;br /&gt;You can check the Check box named "Automatically handle activities that fail" at properties of master sequence. As you might want to have a Check Point, check "Restart job from the failure point".&lt;br /&gt;&lt;br /&gt;In DataStage Job sequence use a exception handler activity. Post exception handler activity can include a email notification activity (same for SMS). On Job Failure the handle will go to the exception handler activity and an email/SMS willl be sent notifying the user that a sequence has failed. It also provides information on failure code as we select part of Job Design.&lt;br /&gt;More on "How to use Notification Activity" in next one. &lt;br /&gt;&lt;br /&gt;Disclaimer: "The postings on this site are my own and don't necessarily represent IBM's positions, strategies or opinions&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Sequence Job Errors:&lt;/b&gt;&lt;br /&gt;&lt;b&gt;1) Controller problem: Loop start &amp;gt; loop end - cannot execute it&lt;/b&gt;&lt;br /&gt;&lt;b&gt;Sol : &lt;/b&gt;Because there is no value being passed from the routine activity? If the re-run of routine activity every time you restart your sequence will not affect anything then check the box "Do not check point" so that the routine activity will fire everytime giving your start loop a value to start with. If thats not what you want you will have to re-think your design.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Server Job Errors :&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Parallel Job Errors : &lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;/div&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-5947766622243508095?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/error-handling-in-datastage-job-design.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-2722319945270464735</guid><pubDate>Fri, 17 Jun 2011 22:07:00 +0000</pubDate><atom:updated>2011-06-17T17:07:59.761-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>how to remove duplicates without using remove duplicates stage</category><category domain='http://www.blogger.com/atom/ns#'>sort stage</category><category domain='http://www.blogger.com/atom/ns#'>remove duplicates</category><title>Sort stage to remove duplicate</title><description>&lt;b&gt;1)what is the advantage of using sort stage over remove duplicate stage in removing duplicates.&lt;br /&gt;2) Is there any way in which we can specify which record to retain(like retaining the last record or retaining the first) when we remove duplicate using transform stage, similarly in sort stage also.&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Ans : &lt;/b&gt;&lt;br /&gt;1)The advantage of using sort stage over remove duplicate stage is that sort stage allows us to capture the duplicate records whereas remove duplicate stage does not.&lt;br /&gt;2) Using a sort stage we can only retain the first record.&lt;br /&gt;Normally we go for retaining last when we sort a particular field in ascending order and try to get the last rec. The same can be done using sort stage by sorting in descending order to retain the first record.&lt;br /&gt;********&lt;br /&gt;Yes indeed...&lt;br /&gt;Use a sort stage with the option "Create Key Change Column" set to True.&lt;br /&gt;Thil will create a column Keychange at the output of the sort as below.&lt;br /&gt;&lt;br /&gt;Input&lt;br /&gt;-----&lt;br /&gt;col_a&lt;br /&gt;------&lt;br /&gt;1&lt;br /&gt;1&lt;br /&gt;1&lt;br /&gt;2&lt;br /&gt;2&lt;br /&gt;3&lt;br /&gt;4&lt;br /&gt;5&lt;br /&gt;6&lt;br /&gt;6&lt;br /&gt;&lt;br /&gt;output&lt;br /&gt;-------&lt;br /&gt;col_a Keychange&lt;br /&gt;------------------&lt;br /&gt;1 1&lt;br /&gt;1 0&lt;br /&gt;1 0&lt;br /&gt;2 1&lt;br /&gt;2 0&lt;br /&gt;3 1&lt;br /&gt;4 1&lt;br /&gt;5 1&lt;br /&gt;6 1&lt;br /&gt;6 0&lt;br /&gt;&lt;br /&gt;For the first occurance of a particular value, it assigns 1 and for subsequent occurances of the same value it assigns 0.&lt;br /&gt;Now you can use a transformer with constraint Keychange=0 to capture duplicate records.&lt;br /&gt;&lt;br /&gt;*********&lt;br /&gt;Can u tell me what is create cluster key change value in sort stage ... if my requirement is that i have to remove duplicate, i dont want to capture the duplicate records. Is that fine to remove duplicate using input tab sort ,unique option of any stage .&lt;br /&gt;******&lt;br /&gt;&lt;br /&gt;the options spcd above for retaining duplicate values using sort stage will obviously work........but as far as I remember there is an option in the properties of the sort stage namely ::ALLOW DUPLICATES.If this property is set to true I think it will satisfy the requirement(provided that u r using datastage 8.x.&lt;br /&gt;&lt;br /&gt;&lt;br /&gt;&lt;br /&gt;If yhe property "cluster key change column" is set to true it will eventually create a cluster key change column in the output.concentrate on the following example::&lt;br /&gt;&lt;br /&gt;i/p_col cluster _key _change_col&lt;br /&gt;1 1&lt;br /&gt;1 0&lt;br /&gt;1 0&lt;br /&gt;2 1&lt;br /&gt;2 0&lt;br /&gt;3 1&lt;br /&gt;1 1&lt;br /&gt;2 1&lt;br /&gt;6 1&lt;br /&gt;6 0&lt;br /&gt;&lt;br /&gt;the logic is that every i/p value will check the value immediete above of it.If it finds a match the o/p is 0 else 1.doesn't matter if it is appeared previously or not.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-2722319945270464735?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/sort-stage-to-remove-duplicate.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-8023550741088755461</guid><pubDate>Wed, 15 Jun 2011 21:52:00 +0000</pubDate><atom:updated>2011-06-15T16:52:49.256-05:00</atom:updated><title>Teradata Database Hardware and Software Architecture</title><description>&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-8023550741088755461?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/teradata-database-hardware-and-software.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-7073094717761790446</guid><pubDate>Wed, 15 Jun 2011 21:49:00 +0000</pubDate><atom:updated>2011-06-15T16:49:00.248-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>global temporary tables</category><category domain='http://www.blogger.com/atom/ns#'>derived tables</category><category domain='http://www.blogger.com/atom/ns#'>Teradata Permanent and Temporary Tables</category><category domain='http://www.blogger.com/atom/ns#'>volatile tables</category><title>Teradata Permanent and Temporary Tables</title><description>To manipulate tabular data, you must submit a query in a language that the database&lt;br /&gt;understands. In the case of the Teradata Database, the language is SQL. You can store the results of multiple SQL queries in tables. Permanent storage of tables is necessary when different sessions and users must share table contents.&lt;br /&gt;&lt;br /&gt;When tables are required for only a single session, you can request that the system creates temporary tables. Using this type of table, you can save query results for use in subsequent queries within the same session. Also, you can break down complex queries into smaller queries by storing results in a temporary table for use during the same session. When the session ends, the system automatically drops the temporary table.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Global Temporary Tables:&lt;/b&gt;&lt;br /&gt;Global temporary tables are tables that exist only for the duration of the SQL session in which they are used. The contents of these tables are private to the session, and the system automatically drops the table at the end of that session. However, the system saves the global temporary table definition permanently in the Data Dictionary. The saved definition may be shared by multiple users and sessions with each session getting its own instance of the table.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Volatile Tables:&lt;/b&gt;&lt;br /&gt;If you need a temporary table for a single use only, you can define a volatile table. The definition of a volatile table resides in memory but does not survive across a system restart. Using volatile tables improves performance even more than using global temporary tables because the system does not store the definitions of volatile tables in the Data Dictionary.Access-rights checking is not necessary because only the creator can access the volatile table.&lt;br /&gt;&lt;b&gt;&lt;br /&gt;Derived Tables:&lt;/b&gt;&lt;br /&gt;A special type of temporary table is the derived table. You can specify a derived table in an SQL SELECT statement. A derived table is obtained from one or more other tables as the result of a subquery. The scope of a derived table is only visible to the level of the SELECT statement calling the subquery.&lt;br /&gt;Using derived tables avoids having to use the CREATE and DROP TABLE statements for&lt;br /&gt;storing retrieved information and assists in coding more sophisticated, complex queries.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-7073094717761790446?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/teradata-permanent-and-temporary-tables.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-220105974834667886</guid><pubDate>Wed, 15 Jun 2011 19:25:00 +0000</pubDate><atom:updated>2011-06-15T14:25:12.805-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>second normal form</category><category domain='http://www.blogger.com/atom/ns#'>Normalization</category><category domain='http://www.blogger.com/atom/ns#'>first normal form</category><category domain='http://www.blogger.com/atom/ns#'>3rd normal form</category><category domain='http://www.blogger.com/atom/ns#'>normal forms</category><category domain='http://www.blogger.com/atom/ns#'>referential integrity</category><title>Database Normalization and Referential Integrity</title><description>Normalization is the process of reducing a complex data structure into a simple, stable one.&lt;br /&gt;Generally this process involves removing redundant attributes, keys, and relationships from&lt;br /&gt;the conceptual data model.&lt;br /&gt;&lt;b&gt;First Normal Form&lt;/b&gt;&lt;br /&gt;First normal form (1NF) is definitive of a relational database. If we are to consider a database&lt;br /&gt;relational, then all relations in the database must be in 1NF.&lt;br /&gt;We say a relation is in 1NF if all fields within that relation are atomic. We sometimes refer to&lt;br /&gt;this concept as the elimination of repeating groups from a relation. Furthermore, first normal&lt;br /&gt;form allows no hierarchies of data values.&lt;br /&gt;&lt;b&gt;&lt;br /&gt;Second Normal Form&lt;/b&gt;&lt;br /&gt;Second normal form (2NF) deals with the elimination of circular dependencies from a&lt;br /&gt;relation. We say a relation is in 2NF if it is in 1NF and if every non-key attribute is fully&lt;br /&gt;dependent on the entire Primary Key.&lt;br /&gt;A non-key attribute is any attribute that is not part of the Primary Key for the relation.&lt;br /&gt;&lt;b&gt;&lt;br /&gt;Third Normal Form:&lt;/b&gt;&lt;br /&gt;Third normal form (3NF) deals with the elimination of non-key attributes that do not&lt;br /&gt;describe the Primary Key.&lt;br /&gt;For a relation to be in 3NF, the relationship between any two non-Primary Key columns, or&lt;br /&gt;groups of columns, in a relation must not be one-to-one in either direction.&lt;br /&gt;We say attributes are mutually independent if none of them is functionally dependent on any&lt;br /&gt;combination of the others. This mutual independence ensures that we can update individual&lt;br /&gt;attributes without any danger of affecting any other attribute in a row.&lt;br /&gt;The following list of benefits summarizes the advantages of implementing a normalized&lt;br /&gt;logical model in 3NF.&lt;br /&gt;• Greater number of relations&lt;br /&gt;• More PI choices&lt;br /&gt;• Optimal distribution of data&lt;br /&gt;• Fewer full table scans&lt;br /&gt;• More joins possible&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Referential Integrity:&lt;/b&gt;&lt;br /&gt;Traditional referential integrity is the concept of relationships between tables, based on the&lt;br /&gt;definition of a primary key and a foreign key. The concept states that a row cannot exist in a&lt;br /&gt;table with a non-null value for a referencing column if an equal value does not exist in a&lt;br /&gt;referenced column.&lt;br /&gt;Using referential integrity, you can specify columns within a referencing table that are foreign&lt;br /&gt;keys for columns in some other referenced table. You must define referenced columns as either&lt;br /&gt;primary key columns or unique columns.&lt;br /&gt;Referential integrity is a reliable mechanism that prevents accidental database inconsistencies&lt;br /&gt;when you perform INSERTS, UPDATES, and DELETES.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-220105974834667886?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/database-normalization-and-referential.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-1796454249211811104</guid><pubDate>Wed, 15 Jun 2011 18:30:00 +0000</pubDate><atom:updated>2011-06-15T13:48:15.849-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>Data distribution</category><category domain='http://www.blogger.com/atom/ns#'>teradata</category><category domain='http://www.blogger.com/atom/ns#'>index</category><category domain='http://www.blogger.com/atom/ns#'>primary index</category><category domain='http://www.blogger.com/atom/ns#'>Data Access Methods</category><title>Teradata Data Distribution and Data Access Methods</title><description>An index is a physical mechanism used to store and access the rows of a table. Indexes on&lt;br /&gt;tables in a relational database function much like indexes in books— they speed up&lt;br /&gt;Information  retrieval.&lt;br /&gt;In general, the Teradata Database uses indexes to:&lt;br /&gt;• Distribute data rows.&lt;br /&gt;• Locate data rows.&lt;br /&gt;• Improve performance.&lt;br /&gt;Indexed access is usually more efficient than searching all rows of a table.&lt;br /&gt;• Ensure uniqueness of the index values.&lt;br /&gt;Only one row of a table can have a particular value in the column or columns defined as a&lt;br /&gt;Unique  Index.&lt;br /&gt;The Teradata Database supports the following types of indexes:&lt;br /&gt;• Primary  &lt;br /&gt;• Secondary&lt;br /&gt;• Join&lt;br /&gt;• Hash&lt;br /&gt;• Special indexes for referential integrity&lt;br /&gt;These indexes are discussed in the following sections.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Primary Index :&lt;/b&gt; The Teradata Database requires one Primary Index (PI) for each table in the database, except for some data dictionary tables and global temporary tables.&lt;br /&gt;&lt;br /&gt;Distributes rows.&lt;br /&gt;Defines most common access path.&lt;br /&gt;May be unique or non-unique.&lt;br /&gt;May be null.&lt;br /&gt;Physical access mechanism.&lt;br /&gt;Required by Teradata Database for most tables.&lt;br /&gt;64-column limit.&lt;br /&gt;Values can be changed.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Primary Indexes and Data Distribution:&lt;/b&gt;&lt;br /&gt;Unique Primary Indexes (UPIs) guarantee uniform distribution of table rows.&lt;br /&gt;Non-Unique Primary Indexes (NUPIs) can cause skewed data. While not a guarantor of&lt;br /&gt;uniform row distribution, the degree of uniqueness of the index will determine the degree of&lt;br /&gt;uniformity of the distribution. Because all rows with the same PI value end up on the same&lt;br /&gt;AMP, columns with a small number of distinct values that are repeated frequently do not&lt;br /&gt;make good PI candidates.&lt;br /&gt;The most efficient access method to data in a table is through the PI. For this reason, choosing&lt;br /&gt;a PI should take the following design goal into consideration: choosing a PI that gives good&lt;br /&gt;distribution of data across the AMPs must be balanced against choosing a PI that reflects the&lt;br /&gt;most common usage pattern of the table.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-1796454249211811104?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/teradata-data-distribution-and-data.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-1641307157447859511</guid><pubDate>Tue, 14 Jun 2011 02:26:00 +0000</pubDate><atom:updated>2011-06-13T21:26:01.127-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>tera byte</category><category domain='http://www.blogger.com/atom/ns#'>yottabte</category><category domain='http://www.blogger.com/atom/ns#'>hexa byte</category><category domain='http://www.blogger.com/atom/ns#'>byte</category><category domain='http://www.blogger.com/atom/ns#'>large data</category><title>how big is yottabyte.......</title><description>&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;a href="http://1.bp.blogspot.com/-tPQLYMWzaDY/TfbGa2DNPDI/AAAAAAAAHVM/u7Ix1tFdB20/s1600/yotabyte.jpg" imageanchor="1" style="margin-left:1em; margin-right:1em"&gt;&lt;img border="0" height="320" width="165" src="http://1.bp.blogspot.com/-tPQLYMWzaDY/TfbGa2DNPDI/AAAAAAAAHVM/u7Ix1tFdB20/s320/yotabyte.jpg" /&gt;&lt;/a&gt;&lt;/div&gt;This year it's become clear that data is scaling to such an degree that you have to change how you manage your desktop and your entire information architecture in order to not just manage your daily work but to succeed.&lt;br /&gt;&lt;br /&gt;It's the core issue of our day, one that's that's a top priority when planning to adopt a virtualized infrastructure that allows for people to access apps from tablets and smartphones.&lt;br /&gt;&lt;br /&gt;The first step is to get a perspective on the size of the data. This infographic shows what a yottabyte represents in comparison to other terms for units of measurement. It may seem far out to think in such terms but considering projected storage requirements, the concept doesn't seem so far fetched.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-1641307157447859511?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/06/how-big-is-yottabyte.html</link><author>noreply@blogger.com (black-hawk)</author><media:thumbnail xmlns:media='http://search.yahoo.com/mrss/' url='http://1.bp.blogspot.com/-tPQLYMWzaDY/TfbGa2DNPDI/AAAAAAAAHVM/u7Ix1tFdB20/s72-c/yotabyte.jpg' height='72' width='72'/><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-494374934791382077</guid><pubDate>Wed, 18 May 2011 19:12:00 +0000</pubDate><atom:updated>2011-05-18T14:13:03.454-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>add blog to google</category><category domain='http://www.blogger.com/atom/ns#'>optimization</category><category domain='http://www.blogger.com/atom/ns#'>search engine</category><title>Search Engine Optimization</title><description>Search Engine Optimization is – a method or process of improving and optimizing the ranking of a website in search engine results.&lt;br /&gt;&lt;br /&gt;After having published a Blog or website, the first thing to do is to make sure that the information relating to the site is available on the internet. Web Directories, Blog Directories and Search Engines are the internet Yellow Pages. Although the major Search Engines will crawl your website even without any action on your part, it will be much faster if you could proactively provide them your site information. We had written articles on how and where to submit the blog information so as to have your site fully indexed and appear in many places on the internet.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Submit Blogger Sitemap to Yahoo:&lt;/b&gt;&lt;br /&gt;Publicize your site by having your Blogger.com or Blogspot.com blog site included in Yahoo! Directory and Yahoo! Search. I went to the official &lt;a href="http://search.yahoo.com/info/submit.html"&gt;Yahoo! submission of URL page &lt;/a&gt;and clicked the link “Submit Your Site for Free”. You will be prompted to log in using your Yahoo! account. For those of you who do not have a Yahoo! account, simply sign up for one. It is Free.&lt;br /&gt;&lt;br /&gt;Select “Submit Site Feed” and enter the URL of your Blog's site feed. You can use either:-&lt;br /&gt;&lt;br /&gt;http://YOURBLOGNAME.blogspot.com/rss.xml&lt;br /&gt;&lt;br /&gt;or&lt;br /&gt;&lt;br /&gt;http://YOURBLOGNAME.blogspot.com/atom.xml&lt;br /&gt;&lt;br /&gt;Remember to replace YOURBLOGNAME with that of your Blog's. For Blogger blogs, do not add "www." to YOURBLOGNAME. Once the Sitemaps are submitted, click the “Authentication” link shown at the top right hand corner of the page.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-494374934791382077?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/05/search-engine-optimization.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-5711398642288634654</guid><pubDate>Thu, 03 Feb 2011 19:11:00 +0000</pubDate><atom:updated>2011-02-03T13:11:58.969-06:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>2g</category><category domain='http://www.blogger.com/atom/ns#'>2g scam</category><category domain='http://www.blogger.com/atom/ns#'>scam</category><category domain='http://www.blogger.com/atom/ns#'>inida scam</category><category domain='http://www.blogger.com/atom/ns#'>raja scam</category><title>2G Scam Explanation clearly..................</title><description>&lt;a href="http://tinypic.com?ref=4hrty1" target="_blank"&gt;&lt;img src="http://i56.tinypic.com/4hrty1.jpg" border="0" alt="Image and video hosting by TinyPic"&gt;&lt;/a&gt;&lt;br /&gt;&lt;a href="http://tinypic.com?ref=33yhvfm" target="_blank"&gt;&lt;img src="http://i53.tinypic.com/33yhvfm.jpg" border="0" alt="Image and video hosting by TinyPic"&gt;&lt;/a&gt;&lt;br /&gt;&lt;br /&gt;&lt;a href="http://tinypic.com?ref=15q2f0n" target="_blank"&gt;&lt;img src="http://i53.tinypic.com/15q2f0n.jpg" border="0" alt="Image and video hosting by TinyPic"&gt;&lt;/a&gt;&lt;br /&gt;&lt;a href="http://tinypic.com?ref=24pgnds" target="_blank"&gt;&lt;img src="http://i54.tinypic.com/24pgnds.jpg" border="0" alt="Image and video hosting by TinyPic"&gt;&lt;/a&gt;&lt;br /&gt;&lt;a href="http://tinypic.com?ref=fmjtxk" target="_blank"&gt;&lt;img src="http://i52.tinypic.com/fmjtxk.gif" border="0" alt="Image and video hosting by TinyPic"&gt;&lt;/a&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-5711398642288634654?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/02/2g-scam-explanation-clearly.html</link><author>noreply@blogger.com (black-hawk)</author><media:thumbnail xmlns:media='http://search.yahoo.com/mrss/' url='http://i56.tinypic.com/4hrty1_th.jpg' height='72' width='72'/><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-1962624489914909244</guid><pubDate>Tue, 01 Feb 2011 18:02:00 +0000</pubDate><atom:updated>2011-05-18T14:11:10.440-05:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>revoke</category><category domain='http://www.blogger.com/atom/ns#'>friend request</category><category domain='http://www.blogger.com/atom/ns#'>facebook</category><title>How to: Revoke Facebook Friend Request</title><description>You were looking at the Facebook profile of that sweet girl in your class and you by mistake sent her a request, whereas you didn’t want to do something like this and wanted to show as if you had no interest in her! Worried, how to cancel the request?? Oh, Poor you!&lt;br /&gt;&lt;br /&gt;There’s another point to it. If you send someone a request, they can access your profile for an indefinite period without confirming it. So it can be a privacy issue too.&lt;br /&gt;&lt;br /&gt;If you are some unlucky chap facing such a situation, then no need to worry. There’s a solution for you to cancel&lt;br /&gt;the friend request you’ve just sent.&lt;br /&gt;&lt;br /&gt;Well actually Facebook does not allow you to revoke a friend request. If you go by the rules, the other person&lt;br /&gt;first has to accept your request and then only can you delete him/her from your profile.&lt;br /&gt;&lt;br /&gt;But there’s a way out and all you have to do is play with your privacy settings. Follow these steps:&lt;br /&gt;&lt;br /&gt;1. After you’ve sent the friend request, go to &lt;b&gt;Setting &gt; Privacy Settings.&lt;/b&gt;&lt;br /&gt;2. &lt;b&gt;Add that person to you ‘Block List‘. This will break the friend request.&lt;/b&gt;&lt;br /&gt;3. &lt;b&gt;Then again unblock him/her.&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Now visit that person’s profile. Magic! It shows that good old ‘Add as Friend’.&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-1962624489914909244?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/02/how-to-revoke-facebook-friend-request.html</link><author>noreply@blogger.com (black-hawk)</author><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-2379816890399670747</guid><pubDate>Tue, 25 Jan 2011 20:12:00 +0000</pubDate><atom:updated>2011-11-12T12:47:29.292-06:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>telugodaa majakka</category><category domain='http://www.blogger.com/atom/ns#'>funny telugu jokes</category><title>Telugodaa majakaaa,,,,,,,,,,,,</title><description>&lt;div dir="ltr" style="text-align: left;" trbidi="on"&gt;&lt;a href="http://tinypic.com/?ref=1zb9w93" target="_blank"&gt;&lt;img alt="Image and video hosting by TinyPic" border="0" src="http://i52.tinypic.com/1zb9w93.jpg" /&gt;&lt;/a&gt;&lt;/div&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-2379816890399670747?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/01/telugodaa-majakaaa.html</link><author>noreply@blogger.com (black-hawk)</author><media:thumbnail xmlns:media='http://search.yahoo.com/mrss/' url='http://i52.tinypic.com/1zb9w93_th.jpg' height='72' width='72'/><thr:total>1</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-4765631918819191644</guid><pubDate>Mon, 24 Jan 2011 20:38:00 +0000</pubDate><atom:updated>2011-01-24T14:51:14.889-06:00</atom:updated><title>Cracking The Credit Card Code</title><description>&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;a href="http://2.bp.blogspot.com/_x3kcH6xLAvM/TT3ixtgYbgI/AAAAAAAAGtA/uvcKGITOFDo/s1600/CrackingCreditCode3.jpg" imageanchor="1" style="margin-left:1em; margin-right:1em"&gt;&lt;img border="0" height="820" width="620" src="http://2.bp.blogspot.com/_x3kcH6xLAvM/TT3ixtgYbgI/AAAAAAAAGtA/uvcKGITOFDo/s1600/CrackingCreditCode3.jpg" /&gt;&lt;/a&gt;&lt;/div&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-4765631918819191644?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/01/cracking-credit-card-code.html</link><author>noreply@blogger.com (black-hawk)</author><media:thumbnail xmlns:media='http://search.yahoo.com/mrss/' url='http://2.bp.blogspot.com/_x3kcH6xLAvM/TT3ixtgYbgI/AAAAAAAAGtA/uvcKGITOFDo/s72-c/CrackingCreditCode3.jpg' height='72' width='72'/><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-745803146864264088</guid><pubDate>Mon, 24 Jan 2011 20:30:00 +0000</pubDate><atom:updated>2011-01-25T14:11:39.633-06:00</atom:updated><title>Two Suns? Twin Stars Could Be Visible From Earth By 2012</title><description>&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;a href="http://3.bp.blogspot.com/_x3kcH6xLAvM/TT3gnz3UZLI/AAAAAAAAGs4/0QvvZ5_8TxQ/s1600/009470-star-wars.gif.jpg" imageanchor="1" style="margin-left:1em; margin-right:1em"&gt;&lt;img border="0" height="180" width="320" src="http://3.bp.blogspot.com/_x3kcH6xLAvM/TT3gnz3UZLI/AAAAAAAAGs4/0QvvZ5_8TxQ/s320/009470-star-wars.gif.jpg" /&gt;&lt;/a&gt;&lt;/div&gt;&lt;br /&gt;* Betelgeuse losing mass&lt;br /&gt;* Explosion will create "new sun"&lt;br /&gt;* May be set for 2012 appearance&lt;br /&gt;&lt;br /&gt;IT'S the ultimate experience for Star Wars fans - staring forlornly off into the distance as twin suns sink into the horizon.&lt;br /&gt;&lt;br /&gt;Yet it's not just a figment of George Lucas's imagination - twin suns are real. And here's the big news - they could be coming to Earth.&lt;br /&gt;&lt;br /&gt;Yes, any day now we see a second sun light up the sky, if only for a matter of weeks.&lt;br /&gt;&lt;br /&gt;The infamous red super-giant star in Orion’s nebula - Betelgeuse - is predicted to go gangbusters and the impending super-nova may reach Earth before 2012, and when it does, all of our wildest Star Wars dreams will come true.&lt;br /&gt;&lt;br /&gt;The second biggest star in the universe is losing mass, a typical indication that a gravitation collapse is occurring.&lt;br /&gt;&lt;br /&gt;When that happens, we'll get our second sun, according to Dr Brad Carter, Senior Lecturer of Physics at the University of Southern Queensland.&lt;br /&gt;&lt;br /&gt;“This old star is running out of fuel in its centre”, Dr Carter said.&lt;br /&gt;&lt;br /&gt;“This fuel keeps Betelgeuse shining and supported. When this fuel runs out the star will literally collapse in upon itself and it will do so very quickly.”&lt;br /&gt;&lt;br /&gt;When this happens a giant explosion will occur, tens of millions of times brighter than the sun.&lt;br /&gt;&lt;br /&gt;The bad news is, it could also happen in a million years. But who's counting?&lt;br /&gt;&lt;br /&gt;The important thing is, one day, night will become day for several weeks on Earth.&lt;br /&gt;&lt;br /&gt;“This is the final hurrah for the star,” says Dr Carter.&lt;br /&gt;&lt;br /&gt;“It goes bang, it explodes, it lights up - we’ll have incredible brightness for a brief period of time for a couple of weeks and then over the coming months it begins to fade and then eventually it will be very hard to see at all.”&lt;br /&gt;&lt;br /&gt;The interwebs is being flooded with doomsday theories saying the impending supernova confirms the Mayan calendar’s prediction of the Armageddon in 2012.&lt;br /&gt;&lt;br /&gt;These conspiracies aren’t helped by the word “Betelgeuse” being associated with the devil.&lt;br /&gt;&lt;br /&gt;Though it is a derivation of the Arabic phrase “yad Al Jauza” meaning the “hand of Al-Jauza” referring to a mysterious woman that controls the order of the universe, it hasn’t stopped some people from clearing out their bunkers and stocking up on tinned food.&lt;br /&gt;&lt;br /&gt;Far from being a sign of the apocalypse, according to Dr Carter the supernova will provide Earth with elements necessary for survival and continuity.&lt;br /&gt;&lt;br /&gt;“When a star goes bang, the first we will observe of it is a rain of tiny particles called nuetrinos,” says Dr Carter.&lt;br /&gt;&lt;br /&gt;“They will flood through the Earth and bizarrely enough, even though the supernova we see visually will light up the night sky, 99 per cent of the energy in the supernova is released in these particles that will come through our bodies and through the Earth with absolutely no harm whatsoever.”&lt;br /&gt;&lt;br /&gt;Stars such as the supernova produce elements that are critical to life on Earth.&lt;br /&gt;&lt;br /&gt;Quite literally, the whole of Earth and our solar system is made of star stuff, including most of the heavy elements of the Periodic Table.&lt;br /&gt;&lt;br /&gt;“It literally makes things like gold, silver - all the heavy elements - even things like uranium….a star like Betelgeuse is instantly forming for us all sorts of heavy elements and atoms that our own Earth and our own bodies have from long past supernovi,” Dr carter said.&lt;br /&gt;&lt;br /&gt;Some experts have speculated Betelgeuse’s explosion may cause a neutron star or result in the formation of a black hole approximately 1300 light years from Earth, but Dr Carter says it could go either way.&lt;br /&gt;&lt;br /&gt;“There’s a reasonably even chance of a neutron star or a black hole”, he says.&lt;br /&gt;&lt;br /&gt;“If it were me, I’d suspect it would more likely become a black hole at 20 solar masses.”&lt;br /&gt;&lt;br /&gt;Source:&lt;a href="http://www.news.com.au/technology/sci-tech/tatooines-twin-suns-coming-to-a-planet-near-you-just-as-soon-as-betelgeuse-explodes/story-fn5fsgyc-1225991009247"&gt;news.com.au&lt;/a&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-745803146864264088?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/01/two-suns-twin-stars-could-be-visible.html</link><author>noreply@blogger.com (black-hawk)</author><media:thumbnail xmlns:media='http://search.yahoo.com/mrss/' url='http://3.bp.blogspot.com/_x3kcH6xLAvM/TT3gnz3UZLI/AAAAAAAAGs4/0QvvZ5_8TxQ/s72-c/009470-star-wars.gif.jpg' height='72' width='72'/><thr:total>0</thr:total></item><item><guid isPermaLink='false'>tag:blogger.com,1999:blog-4474430858172239827.post-209708798840314479</guid><pubDate>Thu, 20 Jan 2011 15:58:00 +0000</pubDate><atom:updated>2011-01-21T18:41:57.862-06:00</atom:updated><category domain='http://www.blogger.com/atom/ns#'>citi bank</category><category domain='http://www.blogger.com/atom/ns#'>aba</category><category domain='http://www.blogger.com/atom/ns#'>routing numbers</category><title>Citibank Routing Numbers</title><description>&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;a href="http://2.bp.blogspot.com/_x3kcH6xLAvM/TTonvIHVAvI/AAAAAAAAGss/vPg38QoKj2g/s1600/citi.jpg" imageanchor="1" style="margin-left:1em; margin-right:1em"&gt;&lt;img border="0" height="270" width="320" src="http://2.bp.blogspot.com/_x3kcH6xLAvM/TTonvIHVAvI/AAAAAAAAGss/vPg38QoKj2g/s320/citi.jpg" /&gt;&lt;/a&gt;&lt;/div&gt;&lt;div class="blogger-post-footer"&gt;&lt;img width='1' height='1' src='https://blogger.googleusercontent.com/tracker/4474430858172239827-209708798840314479?l=t-heartbeat.blogspot.com' alt='' /&gt;&lt;/div&gt;</description><link>http://t-heartbeat.blogspot.com/2011/01/citi-bank-routing-numbers.html</link><author>noreply@blogger.com (black-hawk)</author><media:thumbnail xmlns:media='http://search.yahoo.com/mrss/' url='http://2.bp.blogspot.com/_x3kcH6xLAvM/TTonvIHVAvI/AAAAAAAAGss/vPg38QoKj2g/s72-c/citi.jpg' height='72' width='72'/><thr:total>0</thr:total></item></channel></rss>