2012年8月29日 星期三

handout for hypertable cluster installation and deployment

Base on hadoop/hdfs installation 

172.16.229.128    172.16.229.129
+------------+     +------------+
|Hyperspace  |     |spare ?!    |
|Master      |     |            |
|Cap Source  |     |            |
|------------|     |------------|    
|Namenode    |     |Secondary   |
|            |     |Namenode &  |
|            |     |JobTracker  |
|------------|     |------------|    
|nn01        |     |jt01        |
+-----+------+     +------+-----+            +----NAT(@VM)
      |                  |                  |
------+------------------+------------------+
      |                  |                  |
+-----+------+     +------+-----+     +------+-----+
|RangeServer |     |RangeServer |     |RangeServer |
|ThriftBroker|     |ThriftBroker|     |ThriftBroker|
|------------|     |------------|     |------------|
|Datanode    |     |Datanode    |     |Datanode    |
|TaskTracker |     |TaskTracker |     |TaskTracker |
|------------|     |------------|     |------------|
|dn01        |     |dn02        |     |dn02        |
+------------+     +------------+     +------------+
172.16.229.130     172.16.229.131     172.16.229.132


1. password-less ssh
2. /etc/hosts
#note hypertable master should refer to hadoop hostname
## Master       ==> nn01
## Hypersapce   ==> nn01
## Cap Source   ==> nn01
## <none>       ==> jt01   #maybe spare master
## RangeServer  ==> dn01 ~ dn03
## ThriftBroker ==> dn01 ~ dn03
[root@nn01 yum.repos.d]# cat /etc/hosts
172.16.229.128 hd01 nn01 namenode nn01.hd
172.16.229.129 hd02 jt01 jobtracker  nn02 #secondary namenode
172.16.229.130 hd03 dn01 datanode01
172.16.229.131 hd04 dn02 datanode02
172.16.229.132 hd05 dn03 datanode03

3. allow simultaneous connections from all of the machines in the Hypertable cluster(@admin or @source)
[root@nn01 ~]# vi /etc/ssh/sshd_config
  :
#MaxStartups 10
MaxStartups 100
  :
[root@nn01 ~]# service sshd restart

4. turn off firewall
   or
   maintance iptables for hypertable cluster

5. config open files limits and swappiness(@ all hosts)
vi /etc/security/limits.conf
  :
* - nofile 65536
  :
# or
  :
hdfs - nofile 65536        #Hadoop processes owned by hdfs(@ hadoop cluster ?!)
hypertable - nofile 65536    #Hypertable processes owned by hypertable

#and then re-login , or reboot

[root@nn01 ~]# ulimit -a
  :
open files                      (-n) 65536
  :

[root@nn01 ~]# vi /etc/sysctl.conf
  :
vm.swappiness=0

[root@nn01 ~]# sysctl -p

6.Gem , Mornitor and Capistrano
##install gem @nn01
[root@nn01 ~]# yum install git-core ruby ruby-devel ruby-ext ruby-rdoc
[root@nn01 ~]# wget http://rubyforge.org/frs/download.php/75475/rubygems-1.8.11.tgz
[root@nn01 ~]# tar -zxvf rubygems-1.8.11.tgz
[root@nn01 ~]# cd rubygems-1.8.11;ruby setup.rb
## install capistrano @nn01
[root@nn01 rubygems-1.8.11]# gem install capistrano

##install tools @nn01 (for monitoring server setup)
[root@nn01 rubygems-1.8.11]# yum install gcc-c++ -y
[root@nn01 rubygems-1.8.11]# gem install sinatra rack thin json titleize
[root@nn01 rubygems-1.8.11]# yum install rrdtool -y
[root@nn01 ~]# /sbin/ldconfig

[root@nn01 rubygems-1.8.11]# cd ~

#copy sample file from other
[root@nn01 ~]# scp HT243:~/Cap* .
Capfile.cluster                               100%   12KB  11.9KB/s   00:00   

[root@nn01 ~]# vi Capfile.cluster
set :source_machine,     "nn01"
set :install_dir,        "/opt/hypertable"
set :hypertable_version, "0.9.6.1"
set :default_pkg,        "/extras/hypertable-0.9.6.1-linux-x86_64.rpm"
set :default_dfs,        "hadoop"
set :default_config,     "/root/hypertable.cfg"

role :source, "nn01"
role :master, "nn01"
role :hyperspace, "nn01"
role :slave,  "dn01", "dn02", "dn03"
role :localhost, "nn01"
role :thriftbroker
role :spare, "jt01"

## note: don't use difference hostname (or alias) for the same machine , otherwise you will got error

[root@nn01 ~]# mv Capfile.cluster Capfile
[root@nn01 ~]# mkdir /extras
[root@nn01 ~]# cd /extras
[root@nn01 extras]# wget ftp://HT248/extras/hypertable*
[root@nn01 extras]# ls -l /extras/hypertable-0.9.6.1-linux-x86_64.rpm
-rw-r--r-- 1 root root 94864139 Aug 24 00:00 /extras/hypertable-0.9.6.1-linux-x86_64.rpm


7. installation
# dependence packages you must install for whole cluster's servers
## for /opt/hypertable/current/bin/jrn
[root@nn01 ~]# yum install java-1.6.0-openjdk java-1.6.0-openjdk-devel -y
## for hypertable rpm dependency
[root@nn01 ~]# yum install perl-Bit-Vector perl-IO-String perl-IO-Zlib perl-IO-Socket-INET6 perl-IO-Socket-SSL perl-libwww-perl perl-HTTP-Request

[root@nn01 extras]# cd ~
[root@nn01 ~]# cap install_package
[root@nn01 ~]# cap dist
# FHS-ize: http://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard
[root@nn01 ~]# cap shell
cap> mkdir /etc/opt/hypertable /var/opt/hypertable
cap> chown root:root /etc/opt/hypertable /var/opt/hypertable
cap> exit
exiting
[root@nn01 ~]# cap fhsize


8. startup
##mkdir for hypertable (with hadoop command)
[root@nn01 ~]# sudo -u hdfs hadoop fs -mkdir /hypertable
[root@nn01 ~]# sudo -u hdfs hadoop fs -ls /
Found 6 items
drwxr-xr-x   - hdfs supergroup          0 2012-08-24 16:38 /hypertable
drwxr-xr-x   - root supergroup          0 2012-08-23 02:56 /hypertable-solo
drwxr-xr-x   - hdfs supergroup          0 2012-08-24 12:04 /root
drwxrwxrwt   - hdfs supergroup          0 2012-08-24 09:11 /tmp
drwxr-xr-x   - hdfs supergroup          0 2012-08-23 02:00 /user
drwxr-xr-x   - hdfs supergroup          0 2012-08-24 12:08 /usr
[root@nn01 ~]# sudo -u hdfs hadoop fs -chown -R root /hypertable
[root@nn01 ~]# sudo -u hdfs hadoop fs -ls /
Found 6 items
drwxr-xr-x   - root supergroup          0 2012-08-24 16:38 /hypertable
drwxr-xr-x   - root supergroup          0 2012-08-23 02:56 /hypertable-solo
drwxr-xr-x   - hdfs supergroup          0 2012-08-24 12:04 /root
drwxrwxrwt   - hdfs supergroup          0 2012-08-24 09:11 /tmp
drwxr-xr-x   - hdfs supergroup          0 2012-08-23 02:00 /user
drwxr-xr-x   - hdfs supergroup          0 2012-08-24 12:08 /usr

[root@nn01 ~]# scp HT248:~/hypertable.cfg ~/.
hypertable.cfg                                100%  633     0.6KB/s   00:00   

[root@nn01 ~]# vi ~/hypertable.cfg
  :
HdfsBroker.fs.default.name=hdfs://nn01:9000/
  :
# DFS Broker - for clients
DfsBroker.Port=38030
#don't just set as nn01 , or all slaves will not be excuted dfsbroker
DfsBroker.Host=localhost 
  :
# Hyperspace
Hyperspace.Replica.Host=nn01
#or --> Hyperspace.Replica.Host=localhost
  :

[root@nn01 ~]# cap push_config
[root@nn01 ~]# cap set_current
[root@nn01 ~]# cap shell
  * executing `shell'
====================================================================
Welcome to the interactive Capistrano shell! This is an experimental
feature, and is liable to change in future releases. Type 'help' for
a summary of how to use the shell.
--------------------------------------------------------------------
cap> date
[establishing connection(s) to dn01, dn02, dn03, nn01, jt01]
 ** [out :: dn01] Fri Aug 24 12:19:21 CST 2012
 ** [out :: dn02] Fri Aug 24 12:19:21 CST 2012
 ** [out :: dn03] Fri Aug 24 12:19:21 CST 2012
 ** [out :: nn01] Fri Aug 24 12:19:21 CST 2012
 ** [out :: jt01] Fri Aug 24 12:19:21 CST 2012
cap> quit
exiting
## you could make a line for excuting it instead, like
## cap invoke COMMAND="date"

[root@nn01 ~]# cap start

##you could close the cluster as
[root@nn01 ~]# cap stop

[root@nn01 log]# elinks -dump http://nn01:38090/
   [1]Hypertable

                             Hypertable Monitoring

   [2]Range Servers  | [3]Tables

  Range Servers

    Number of Range Servers: 3

    Master Version: Python 2.6.8

                                                                            Disk Disk RAM  Clock Range Last    Last
Server[4][IMG] Host       IP       System        Kernel         Arch  Cores (TB) Use  (GB) Skew  Count Error Contact
                                                                                 (%)
                                                                                                             Wed Aug
[5]rs1         dn03 172.16.229.132 CentOS Linux                x86_64     1 0.20    1 1.02 0     4     ok    29
                                   6.0    2.6.32-71.el6.x86_64                                               15:14:14
                                                                                                             2012
                                                                                                             Wed Aug
[6]rs2         dn02 172.16.229.131 CentOS Linux                x86_64     1 0.20    1 1.02 0     2     ok    29
                                   6.0    2.6.32-71.el6.x86_64                                               15:14:14
                                                                                                             2012
                                                                                                             Wed Aug
[7]rs3         dn01 172.16.229.130 CentOS Linux                x86_64     1 0.20    1 1.02 0     2     ok    29
                                   6.0    2.6.32-71.el6.x86_64                                               15:14:14
                                                                                                             2012

   Hypertable

References

   Visible links
   2. http://nn01:38090/
   3. http://nn01:38090/tables
   4. http://nn01:38090/?sort=name&ord=dsc
   5. http://nn01:38090/graphs?server=rs1
   6. http://nn01:38090/graphs?server=rs2
   7. http://nn01:38090/graphs?server=rs3


[FAQ]
(1) cap XXX
   You must executed "$cap XXX" at the folder with "Capfile". In this deployment,
   Capfile is exist at /root . That means all "cap XXX" must change your PWD to /root.
   It also means , It could only launch hypertable by 'root' user.
(2) Hypertable should be routable with all nodes of Hadoop
   Hypertable's DfsBroker(hadoop Broker) is as a DFS client . When it Read/Write on HDFS,
   1) Get File Pos(Metadata) from Namenode ,and tell DFS client where the file is.
   2) Go to Datanode, and Read/Write for the file at Pos.
   Otherwise , when you start Hypertable.Master up . It will give you errors.
(3) Master Could not start up
   when Master could not start up , and you got a message in DfsBroker.hadoop.log

Aug 29, 2012 2:29:10 PM org.hypertable.DfsBroker.hadoop.HdfsBroker Create
SEVERE: I/O exception while creating file '/hypertable/servers/master/log/mml/1' - org.apache.hadoop.ipc.RemoteException: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot create file/hypertable/servers/master/log/mml/1. Name node is in safe mode.
   
   try to 'cap stop' and 'cap start'
      or
  'cap stop' ; remove /hypertable/servers/master/log/mml/## ; 'cap start'

   maybe the problem is no longer exist.

reference:
http://code.google.com/p/hypertable/wiki/HypertableManual
https://groups.google.com/forum/?fromgroups#!topic/hypertable-user/a7bMbucORMg
http://hypertable.com/documentation/administrator_guide/monitoring_system/
https://groups.google.com/forum/?fromgroups#!topic/hypertable-user/ZcioX6bUKd4
http://osdir.com/ml/hypertable-user/2010-06/msg00021.html

沒有留言:

張貼留言

文章分類