2012年11月6日 星期二

Torque with MPICH2 , Hadoop and Hypertable


Torque(OpenPBS) Resource Manager
base on: OpenMP and MPICH2 hybrid example


[root@cent146 ~]# cat /etc/hosts
172.16.43.245   DFS3 HT245      # Added by NetworkManager
127.0.0.1       localhost.localdomain   localhost
::1     DFS3    localhost6.localdomain6 localhost6
172.16.173.145  cent145
172.16.173.146  cent146


  172.16.173.146      172.16.173.145             172.16.43.141
+---------------+     +-------------+             +------------+
|Head node      |     |Compute node |             |Hypertable  |
|Compute node   |     |             |             |            |
|---------------|     |-------------|             |------------|
|HT thrift api  |     |HT thrift api|             |            |
|---------------|     |-------------|             |------------|
|pbs_server     |     |pbs_mom      |             |            |
|pbs_sched(maui)|     |             |             |------------|
|pbs_mom        |     |             |             |nn01        |
|---------------|     |-------------|             +------+-----+
|cent146        |     |cent145      |                    |
+-------+-------+     +------+------+     +----NAT(@VM)--+-
        |                    |            |
--------+--------------------+------------+

##qsub: submit job to pbs_server
##      pbs_server informs to pbs_sched(default FIFO scheduler,pbs_sched, or advanced by MAUI/MOAB)
##      pbs_sched make local policy decisions for resource usage and allocate nodes to jobs
##      pbs_sched sends instructions to run the job with the node list to pbs_server
##      pbs_server must recognize which systems on the network are its compute nodes($TORQUE_HOME/server_priv/nodes)
##      pbs_server sends the new job to the 1st node(Mother Superior) in the node list to run it 
##                                          Other nodes in a job are called sister moms.
## in self-built rpm , $TORQUE_HOME is equal to /var/spool/torque
## in epel rpm , $TORQUE_HOME is equal to /etc/torque
##qmgr: the server configuration can be modified to set up TORQUE to do useful work.
##      such as enable sheduler, 

[self-built rpm(see Appendix-A) without munge and with ssh/scp, torque-4.1.2 ]
# @cent145 & cent146, install it by root user
rpm -Uvh http://ftp.sjtu.edu.cn/sites/download.fedora.redhat.com/pub/epel/6/x86_64/epel-release-6-7.noarch.rpm
[root@cent146 ~]# yum install torque torque-devel torque-scheduler torque-server torque-mom
[root@cent145 ~]# yum install torque torque-devel torque-scheduler torque-server torque-mom



## setup ssh passwordless (root and hypertable users)
## setup information below

cat /usr/share/doc/torque-4.1.2/README.torque

FireFox to http://adaptivecomputing.com/documentation/
or
FireFox to http://mauichina.blogspot.tw/2009/06/torque_19.html

[root@cent146 server_priv]# rpm -ql torque-server
    :
    :
/var/spool/torque/server_priv/nodes
/var/spool/torque/server_priv/queues


##Setting up




STEP-1)create serverdb file(in $TORQUE_HOME/server_priv )

## http://www.adaptivecomputing.com/resources/docs/torque/4-1-2/Content/topics/1-installConfig/initializeConfigOnServer.htm#pbs_server%20-t%20create
[root@cent146 ~]# pbs_server -t create ## These commands must be executed by root. 
##or
##[root@cent146 ~]# cp /usr/share/doc/torque-2.5.7/torque.setup .
##[root@cent146 ~]# ./torque.setup 'username'


[root@cent146 ~]# qmgr -c 'p s' 
#
# Set server attributes.
#
set server acl_hosts = cent146
set server log_events = 511
set server mail_from = adm
set server scheduler_iteration = 600
set server node_check_rate = 150
set server tcp_timeout = 300
set server job_stat_rate = 45
set server poll_jobs = True
set server mom_job_sync = True
set server moab_array_compatible = True

[root@cent146 ~]# ps -ef | grep pbs_server
root      2426     1  0 19:22 ?        00:00:00 pbs_server -t create
root      2435  1361  0 19:24 pts/0    00:00:00 grep pbs_server
[root@cent146 ~]# qterm


STEP-2)specify all nodes @cent146 

## http://www.adaptivecomputing.com/resources/docs/torque/4-1-2/Content/topics/1-installConfig/specifyComputeNodes.htm
[root@cent146 server_priv]# vi /var/spool/torque/server_priv/nodes
cent146 master comnode
cent145 slave comnode

[root@cent146 ~]# ps -ef | grep pbs_server
root      2439  1361  0 19:24 pts/0    00:00:00 grep pbs_server
[root@cent146 ~]# pbs_server  ## restart up pbs_server for following setup-procedure


STEP-3)specify compute node's config(for cent145 & cent146, root user only)

## privilege for pbs_server(hostname in /etc/hosts) launch job to pbs_mom
[root@cent146 ~]# vi /var/spool/torque/mom_priv/config
$pbsserver cent146
[root@cent145 ~]# vi /var/spool/torque/mom_priv/config
$pbsserver cent146




STEP-4)create a queue and enable the server to accept and run jobs

[root@cent146 ~]# find / -name torque.setup
/usr/share/doc/torque-server-4.1.2/torque.setup
[root@cent146 ~]# cp /usr/share/doc/torque-server-4.1.2/torque.setup ~/.
[root@cent146 ~]# chmod 755 torque.setup 
[root@cent146 ~]# torque.setup root
[root@cent146 ~]# qmgr -c "set server scheduling=true"
[root@cent146 ~]# qmgr -c "create queue batch queue_type=execution"
[root@cent146 ~]# qmgr -c "set queue batch started=true"
[root@cent146 ~]# qmgr -c "set queue batch enabled=true"
[root@cent146 ~]# qmgr -c "set queue batch resources_default.nodes=1"
[root@cent146 ~]# qmgr -c "set queue batch resources_default.walltime=3600"
[root@cent146 ~]# qmgr -c "set server default_queue=batch" 

[root@cent146 ~]# qmgr -c 'p s' 
#
# Create queues and set their attributes.
#
#
# Create and define queue batch
#
create queue batch
set queue batch queue_type = Execution
set queue batch resources_default.nodes = 1
set queue batch resources_default.walltime = 01:00:00
set queue batch enabled = True
set queue batch started = True
#
# Set server attributes.
#
set server scheduling = True
set server acl_hosts = cent146
set server default_queue = batch
set server log_events = 511
set server mail_from = adm
set server scheduler_iteration = 600
set server node_check_rate = 150
set server tcp_timeout = 300
set server job_stat_rate = 45
set server poll_jobs = True
set server mom_job_sync = True
set server next_job_number = 0
set server moab_array_compatible = True

##verify all queues are properly configured 
[root@cent146 server_priv]# qstat -q

server: cent146

Queue            Memory CPU Time Walltime Node  Run Que Lm  State
---------------- ------ -------- -------- ----  --- --- --  -----
batch              --      --       --      --    0   0 --   E R
                                               ----- -----
                                                   0     0
## verify all nodes are correctly reporting 
[root@cent146 server_priv]# pbsnodes -a
cent146
     state = free
     np = 1
     properties = master,comnode
     ntype = cluster
     status = rectime=1350043175,varattr=,jobs=,state=free,netload=14748874,gres=,loadave=0.00,ncpus=1,physmem=1017464kb,availmem=2876912kb,totmem=3081840kb,idletime=63603,nusers=0,nsessions=0,uname=Linux cent146 2.6.32-71.el6.x86_64 #1 SMP Fri May 20 03:51:51 BST 2011 x86_64,opsys=linux
     mom_service_port = 15002
     mom_manager_port = 15003
     gpus = 0

cent145
     state = free
     np = 1
     properties = slave,comnode
     ntype = cluster
     mom_service_port = 15002
     mom_manager_port = 15003
     gpus = 0

## submit a basic job - DO NOT RUN AS ROOT 
[root@cent146 server_priv]# echo "sleep 30" | qsub
qsub can not be run as root
[root@cent146 server_priv]# useradd -u 500 hypertable
[root@cent146 server_priv]# su - hypertable
[root@cent146 server_priv]# torque.setup hypertable  ##notice: ssh passwordless should be completed
[root@cent146 server_priv]# echo set server operators += hypertable@cent146 | qmgr
Max open servers: 9
[root@cent146 server_priv]# echo set server managers += hypertable@cent146 | qmgr
Max open servers: 9
[hypertable@cent146 ~]$ cat /tmp/dolog.sh
#!/bin/bash
echo "$(date +%Y%m%d-%H%M%S)" >> /tmp/tmp.log
[hypertable@cent146 ~]$ chmod 755 /tmp/dolog.sh
[hypertable@cent146 ~]$ scp /tmp/dolog.sh cent146:/tmp/.
[hypertable@cent146 ~]$ qsub -l nodes=1:slave+1:comnode /tmp/dolog.sh
#[hypertable@cent146 ~]$ echo "sleep 30" | qsub
0.cent146

[hypertable@cent146 ~]$ qstat -a

cent146: 
                                                                         Req'd  Req'd   Elap
Job ID               Username    Queue    Jobname          SessID NDS   TSK    Memory Time  S Time
-------------------- ----------- -------- ---------------- ------ ----- ------ ------ ----- - -----
0.cent146            hypertable  batch    dolog.sh           1479     2 1:slav    --  01:00 Q 00:00

##At this point, the job should be in the Q state and will not run because a scheduler is not running yet. 



STEP-5) service auto-launch in boot time

[root@cent146 mom_priv]# chkconfig --list | grep pbs
pbs_mom         0:??  1:??  2:??  3:??  4:??  5:??  6:??
pbs_sched       0:??  1:??  2:??  3:??  4:??  5:??  6:??
pbs_server      0:??  1:??  2:??  3:??  4:??  5:??  6:??
[root@cent146 mom_priv]# chkconfig --list | grep trqauthd
trqauthd        0:??  1:??  2:??  3:??  4:??  5:??  6:??

[root@cent145 mom_logs]#  chkconfig --list | grep pbs
pbs_mom         0:??  1:??  2:??  3:??  4:??  5:??  6:??
pbs_sched       0:??  1:??  2:??  3:??  4:??  5:??  6:??
pbs_server      0:??  1:??  2:??  3:??  4:??  5:??  6:??
[root@cent145 mom_logs]# chkconfig --list | grep trqauthd
trqauthd        0:??  1:??  2:??  3:??  4:??  5:??  6:??


[openpbs with mpiexec]

[hypertable@cent146 ~]$ cat /tmp/dolog.sh
#!/bin/bash
echo "$(date +%Y%m%d-%H%M%S)" >> /tmp/tmp.log
[hypertable@cent146 ~]$ scp /tmp/dolog.sh cent145:/tmp/.

[hypertable@cent146 ~]$ cat host.hydra 
cent145
cent146

[hypertable@cent146 ~]$ vi myprog
###PBS -l nodes=1:master+1:slave
#PBS -l nodes=1:master
#PBS -N sean_job
#PBS -j oe
##/tmp/dolog.sh
/usr/lib64/mpich2/bin/mpiexec -f host.hydra -n 4 /tmp/dolog.sh

[hypertable@cent146 ~]$ qsub -V myprog 
33.cent146
[hypertable@cent146 ~]$ qstat
Job id                    Name             User            Time Use S Queue
------------------------- ---------------- --------------- -------- - -----
33.cent146                 sean_job         hypertable      00:00:00 C batch    



STEP-6)Optional: install maui instead of pbs_sched

## rpm build by fpm , refer to APPENDIX-B
[hypertable@cent146 ~]$ su -
[root@cent146 ~]# yum install maui
[root@cent146 ~]# source /etc/profile.d/maui.sh
##environment $PATH already config in /etc/profile.d/maui.sh(bash)
[root@cent146 ~]# vi /usr/local/maui/maui.cfg
# maui.cfg 3.3.1

SERVERHOST            cent146
# primary admin must be first in list
ADMIN1                root hypertable #add hypertable , or there will error about authorization

# Resource Manager Definition

#RMCFG[CENT145] TYPE=PBS
#RMCFG[CENT146] TYPE=PBS@RMNMHOST@fRMTYPE[0] PBS
RMCFG[CENT146] TYPE=PBS

#before starting maui, pbs_sched should be off 
[root@cent146 server_priv]# service pbs_sched stop
Shutting down TORQUE Scheduler:                            [  OK  ]
[root@cent146 server_priv]# chkconfig pbs_sched off
[root@cent146 server_priv]# chkconfig --list | grep maui
maui.d          0:??  1:??  2:??  3:??  4:??  5:??  6:??
[root@cent146 server_priv]# service maui.d start
Starting MAUI Scheduler:                                   [  OK  ]
[root@cent146 server_priv]# showq
ACTIVE JOBS--------------------
JOBNAME            USERNAME      STATE  PROC   REMAINING            STARTTIME


     0 Active Jobs       0 of    0 Processors Active (0.00%)

IDLE JOBS----------------------
JOBNAME            USERNAME      STATE  PROC     WCLIMIT            QUEUETIME


0 Idle Jobs

BLOCKED JOBS----------------
JOBNAME            USERNAME      STATE  PROC     WCLIMIT            QUEUETIME


Total Jobs: 0   Active Jobs: 0   Idle Jobs: 0   Blocked Jobs: 0
[root@cent146 server_priv]# su - hypertable
[root@cent146 server_priv]# qsub myprog
[hypertable@cent146 ~]$ qstat -a

cent146: 
                                                                         Req'd  Req'd   Elap
Job ID               Username    Queue    Jobname          SessID NDS   TSK    Memory Time  S Time
-------------------- ----------- -------- ---------------- ------ ----- ------ ------ ----- - -----
81.cent146           hypertable  batch    sean_job          30286     1 master    --  01:00 C 00:00




STEP-7)Optional: install hypertable thrift interface

##download rpm
[root@cent146 ~]# scp 172.16.43.248:/var/ftp/extras/hypertable-th* .
[root@cent146 ~]# scp hypertable-thriftbroker-only-0.9.6.1-linux-x86_64.rpm cent145:~/.
[root@cent146 ~]# yum localinstall hypertable-thriftbroker-only-0.9.6.1-linux-x86_64.rpm --nogpgcheck
[root@cent145 ~]# yum localinstall hypertable-thriftbroker-only-0.9.6.1-linux-x86_64.rpm --nogpgcheck
[root@cent146 ~]# ln -s /opt/hypertable/0.9.6.1/ /opt/hypertable/current
[root@cent145 ~]# ln -s /opt/hypertable/0.9.6.1/ /opt/hypertable/current


## Make candle.cc at Hypertable Server

###prepare a Hypertable develope environment in MPICH2 server(cent146)
#[root@cent146 include]# yum install gcc g++ make boost-devel
#[root@cent146 include]# scp -r 172.16.43.141:/opt/hypertable/current/include/* .
#[root@cent146 src]# cd /opt/hypertable/current/lib
#[root@cent146 lib]# scp 172.16.43.141:/opt/hypertable/current/lib/libHyperCommon.a .
#[root@cent146 lib]# scp 172.16.43.141:/opt/hypertable/current/lib/libHypertable.a .
#[root@cent146 lib]# ln -s /opt/hypertable/current/lib/libthrift-0.8.0.so /opt/hypertable/current/lib/libthrift.so
#[root@cent146 lib]# ln -s /opt/hypertable/current/lib/libevent-1.4.so.2 /opt/hypertable/current/lib/libevent.so
#[root@cent146 lib]# ln -s /opt/hypertable/current/lib/liblog4cpp.so.4 /opt/hypertable/current/lib/liblog4cpp.so

[hypertable@nn01 src]$ cat candle.cc
#include <iostream>
#include <fstream>

#include <netinet/in.h>
#include "ThriftBroker/Client.h"
#include "ThriftBroker/gen-cpp/HqlService.h"
#include "ThriftBroker/ThriftHelper.h"
#include "ThriftBroker/SerializedCellsReader.h"

using namespace Hypertable;
using namespace Hypertable::ThriftGen;

void run(Thrift::Client *client);
void test_hql(Thrift::Client *client, std::ostream &out);


int main(int argc , char * argv[]) {
  //Thrift::Client *client = new Thrift::Client("localhost", 38080);
  Thrift::Client *client = NULL;
  if(argc > 1)
  {
        client = new Thrift::Client(argv[1], 38080);
        std::cout << argv[1] << std::endl;
  }
  else
  {
        client = new Thrift::Client("localhost", 38080);
        std::cout << "localhost" << std::endl;
  }
  if(client)
        run(client);
}


void run(Thrift::Client *client) {
  try {
    std::ostream &out = std::cout;
    out << "running test_hql" << std::endl;
    test_hql(client, out);
  }
  catch (ClientException &e) {
    std::cout << e << std::endl;
    exit(1);
  }
}



void test_hql(Thrift::Client *client, std::ostream &out) {
  HqlResult result;
  if (!client->namespace_exists("quote"))
  {
    out << "no quote namespace exist" << std::endl;
    return;
  }
  Namespace ns = client->namespace_open("quote");
  if(client->table_exists(ns,"candle_daily"))
  {
    HqlResultAsArrays result_as_arrays;
    client->hql_query_as_arrays(result_as_arrays, ns, "select * from candle_daily");


    out << result_as_arrays.cells[0] << std::endl << "****" << std::endl;
    for(unsigned int i = 0 ; i < 2 ; i++)
      for(unsigned int j = 0 ; j < result_as_arrays.cells[i].size() ; j++)
      out << result_as_arrays.cells[i][j] << std::endl << "****" << std::endl;
  }

  client->namespace_close(ns);
}

[hypertable@nn01 src]$ export LD_LIBRARY_PATH=/opt/hypertable/current/lib/; make candle
[hypertable@nn01 src]$ scp candle cent145:~/src/ht/.
[hypertable@nn01 src]$ scp candle cent146:~/src/ht/.



##Prepare PBS and launch the job(hypertable thrift broker's ip is 172.16.43.141)

[hypertable@cent146 ~]$ cat myht
#PBS -l nodes=master
#PBS -N ht_job
#PBS -j oe
export LD_LIBRARY_PATH=/opt/hypertable/current/lib:$LD_LIBRARY_PATH
time /usr/lib64/mpich2/bin/mpiexec -f host.hydra -n 2 ~/src/ht/candle 172.16.43.141

[hypertable@cent146 ~]$ scp myht cent145:~/.
[hypertable@cent146 ~]$ qsub myht
90.cent146
[hypertable@cent146 ~]$ qstat -a

cent146: 
                                                                         Req'd  Req'd   Elap
Job ID               Username    Queue    Jobname          SessID NDS   TSK    Memory Time  S Time
-------------------- ----------- -------- ---------------- ------ ----- ------ ------ ----- - -----
90.cent146           hypertable  batch    ht_job             1336     1 master    --  01:00 C 00:00


APPENDIX-A)build a torque rpmfile

##default torque-*.rpm seems make with r*(rcp , rsh...etc), I need to re-build a rpm with ssh,scp...


[root@cent145 ~]# cd /root/rpmbuild/SOURCE
[root@cent145 SOURCE]# wget http://www.adaptivecomputing.com/resources/downloads/torque/torque-4.1.2.tar.gz
##wget http://www.adaptivecomputing.com/resources/downloads/torque/torque-2.5.7.tar.gz
[root@cent145 SOURCE]# tar -zxvf torque-4.1.2.tar.gz 
[root@cent145 SOURCE]# cd torque-4.1.2
[root@cent145 torque-4.1.2]# cp torque.spec ../../SPECS
[root@cent145 torque-4.1.2]# yum install openssl-devel
[root@cent145 SPECS]# cd ../../SPECS
[root@cent145 SPECS]# rpmbuild -bb torque.spec
[root@cent145 SPECS]# cd ../BUILD/torque-4.1.2
[root@cent145 torque-4.1.2]# cat config.status| grep "RSH_PATH"
S["RSH_PATH"]="ssh"
[root@cent145 torque-4.1.2]# cat config.status| grep "RCP"
S["INCLUDE_MOM_RCP_FALSE"]=""
S["INCLUDE_MOM_RCP_TRUE"]="#"
S["RCP_ARGS"]="-rpB"
S["RCP_PATH"]="/usr/bin/scp"
[root@cent145 torque-4.1.2]# cd ../../SPECS
[root@cent145 SPECS]# mkdir -p /var/ftp/torque
[root@cent145 SPECS]# cp ../RPMS/x86_64/torque* /var/ftp/torque
[root@cent145 SPECS]# cp ../RPMS/noarch/torque* /var/ftp/torque

#Add a new repository(createrepo) for mpich2,
#and then install mpich2 and all dependencies with yum for all servers.


APPENDIX-B)build a maui rpmfile

[root@cent145 ~]# yum install git-core ruby ruby-devel ruby-ext ruby-rdoc
[root@cent145 ~]# wget http://rubyforge.org/frs/download.php/75475/rubygems-1.8.11.tgz
[root@cent145 ~]# tar -zxvf rubygems-1.8.11.tgz
[root@cent145 ~]# cd rubygems-1.8.11;ruby setup.rb
[root@cent145 rubygems-1.8.11]# gem install fpm


##download maui-3.3.1.tar.gz from www.adaptivecomputing.com

[root@cent145 ~]# tar -zxvf maui-3.3.1.tar.gz
[root@cent145 ~]# cd maui-3.3.1
[root@cent145 maui-3.3.1]# ./configure --prefix=/usr/local/maui
[root@cent145 maui-3.3.1]# make
make -C src/moab all
make[1]: Entering directory `/root/maui-3.3.1/src/moab'
gcc -I../../include/ -I/usr/local/maui/include     -I/usr/include/torque -D__LINUX   -D__MPBS        -g -O2 -D__M64  -c MPBSI.c
MPBSI.c:177: ?航炊嚗? ?et_svrport??憿?銵?
/usr/include/torque/pbs_ifl.h:686: ?酉嚗€et_svrport????銝€?恐?甇?MPBSI.c:178: ?航炊嚗? ?penrm??憿?銵?
/usr/include/torque/pbs_ifl.h:687: ?酉嚗€penrm????銝€?恐?甇?make[1]: *** [MPBSI.o] Error 1
make[1]: Leaving directory `/root/maui-3.3.1/src/moab'
make: *** [all] Error 2


##Fix the problem when make err on the newest maui with torque

[root@cent145 maui-3.3.1]# find . -name MPBSI.c
./src/moab/MPBSI.c
[root@cent145 maui-3.3.1]# cp ./src/moab/MPBSI.c ./src/moab/MPBSI.c.orig
[root@cent145 maui-3.3.1]# vi ./src/moab/MPBSI.c
[root@cent145 maui-3.3.1]# diff ./src/moab/MPBSI.c.orig ./src/moab/MPBSI.c
177,178c177,178
< extern int get_svrport(const char *,char *,int);
< extern int openrm(char *,int);
---
> extern unsigned int get_svrport(char *,char *,unsigned int);
> extern int openrm(char *,unsigned int);
[root@cent145 maui-3.3.1]# make clean;make


##place into tmp dir, and prepare for rpm

[root@cent145 maui-3.3.1]# sed -i'.bkp' 's/\$(INST_DIR)/\$(DESTDIR)\/\$(INST_DIR)/g' src/*/Makefile
[root@cent145 maui-3.3.1]# sed -i'' 's/\$(MSCHED_HOME)/\$(DESTDIR)\/\$(MSCHED_HOME)/g' src/*/Makefile
[root@cent145 maui-3.3.1]# DESTDIR=/tmp/maui make install
[root@cent145 maui-3.3.1]# ls -l /tmp/maui/
蝮質? 4
drwxr-xr-x 3 root root 4096 2012-10-23 22:45 usr

[root@cent145 maui-3.3.1]# mkdir /tmp/maui/etc
[root@cent145 maui-3.3.1]# mkdir /tmp/maui/etc/profile.d
[root@cent145 maui-3.3.1]# mkdir /tmp/maui/etc/init.d
[root@cent145 maui-3.3.1]# cp etc/maui.d /tmp/maui/etc/init.d/
[root@cent145 maui-3.3.1]# cp etc/maui.{csh,sh} /tmp/maui/etc/profile.d/


##edit /tmp/maui/etc/init.d/maui.d at line 12, change MAUI_PREFIX setting as

[root@cent145 maui-3.3.1]# vi /tmp/maui/etc/init.d/maui.d 
    :
#MAUI_PREFIX=/opt/maui
MAUI_PREFIX=/usr/local/maui
    :


## add 2 shell scripts 

[root@cent145 maui-3.3.1]# vi /tmp/maui/post-install.sh 
#!/bin/bash
chkconfig --add maiu.d
chkconfig --level 3456 maui.d on

[root@cent145 maui-3.3.1]# vi /tmp/maui/pre-uninstall.sh 
#!/bin/bash
chkconfig --del maui.d
[root@cent145 maui-3.3.1]# chmod 755 /tmp/maui/post-install.sh 
[root@cent145 maui-3.3.1]# chmod 755 /tmp/maui/pre-uninstall.sh 


## rpm build by fpm

[root@cent145 maui-3.3.1]# fpm -s dir -t rpm -n maui -v 3.3.1 -C /tmp/maui \
> -p /tmp/maui-3.3.1-x86_64-fpmbuild.rpm --post-install /tmp/maui/post-install.sh \
> --pre-uninstall /tmp/maui/pre-uninstall.sh etc usr
[root@cent145 maui-3.3.1]# ls -l /tmp/*.rpm
-rw-r--r-- 1 root root 42178761 2012-10-23 22:55 /tmp/maui-3.3.1-x86_64-fpmbuild.rpm
[root@cent145 maui-3.3.1]# rpm -qpl /tmp/maui-3.3.1-x86_64-fpmbuild.rpm 
/etc/init.d/maui.d
/etc/profile.d/maui.csh
/etc/profile.d/maui.sh
/usr/local/maui/bin/canceljob
/usr/local/maui/bin/changeparam
/usr/local/maui/bin/checkjob
/usr/local/maui/bin/checknode
/usr/local/maui/bin/diagnose
/usr/local/maui/bin/mbal
/usr/local/maui/bin/mclient
/usr/local/maui/bin/mdiag
/usr/local/maui/bin/mjobctl
/usr/local/maui/bin/mnodectl
/usr/local/maui/bin/mprof
/usr/local/maui/bin/mschedctl
/usr/local/maui/bin/mstat
/usr/local/maui/bin/releasehold
/usr/local/maui/bin/releaseres
/usr/local/maui/bin/resetstats
/usr/local/maui/bin/runjob
/usr/local/maui/bin/schedctl
/usr/local/maui/bin/sethold
/usr/local/maui/bin/setqos
/usr/local/maui/bin/setres
/usr/local/maui/bin/setspri
/usr/local/maui/bin/showbf
/usr/local/maui/bin/showconfig
/usr/local/maui/bin/showgrid
/usr/local/maui/bin/showhold
/usr/local/maui/bin/showq
/usr/local/maui/bin/showres
/usr/local/maui/bin/showstart
/usr/local/maui/bin/showstate
/usr/local/maui/bin/showstats
/usr/local/maui/include/moab.h
/usr/local/maui/lib/libmcom.a
/usr/local/maui/lib/libmoab.a
/usr/local/maui/log
/usr/local/maui/maui-private.cfg
/usr/local/maui/maui.cfg
/usr/local/maui/sbin/maui
/usr/local/maui/spool
/usr/local/maui/stats
/usr/local/maui/tools
/usr/local/maui/traces
[root@cent145 maui-3.3.1]# rpm -qpi /tmp/maui-3.3.1-x86_64-fpmbuild.rpm 
Name        : maui                         Relocations: / 
Version     : 3.3.1                             Vendor: root@cent145
Release     : 1                             Build Date: 镼踹?2012撟?0??3??(?曹?) 22??5??0蝘?Install Date: (not installed)               Build Host: cent145
Group       : default                       Source RPM: maui-3.3.1-1.src.rpm
Size        : 102654035                        License: unknown
Signature   : (none)
Packager    : <root@cent145>
URL         : http://example.com/no-uri-given
Summary     : no description given
Description :
no description given


## rpm file could be local install by

[root@cent145 maui-3.3.1]# yum localinstall /tmp/maui-3.3.1-x86_64-fpmbuild.rpm

## or update yum client's repo setting /etc/yum.repos.d/extension.repo

[mpich2]
name=CentOS-$releasever - mpich2
baseurl=ftp://172.16.43.248/mpich2
gpgcheck=0
[torque]
name=CentOS-$releasever - torque
baseurl=ftp://172.16.43.248/torque
gpgcheck=0
[maui]
name=CentOS-$releasever - maui
baseurl=ftp://172.16.43.248/maui
gpgcheck=0
[root@cent145 maui-3.3.1]# yum install maui
[root@cent145 tmp]# chkconfig --list | grep maui
maui.d          0:??  1:??  2:??  3:??  4:??  5:??  6:??

reference: http://blog.ajdecon.org/installing-the-maui-scheduler-with-torque-410/


沒有留言:

張貼留言

文章分類