2017年8月19日土曜日

Vagrantを使用してH2 databaseがインストールされた仮想マシンを構築する

以下のVagrantfileを使用して、h2 databaseがインストールされた仮想マシンを構築できます。Webコンソールをサービスとして登録しているので、仮想マシン起動時に自動的にコンソールも利用可能になります。 Vagrantfile

VAGRANTFILE_API_VERSION = "2"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
  config.vm.box = "bento/centos-7.3"
  config.vm.hostname = "h2db"
  config.vm.provider :virtualbox do |vbox|
     vbox.name = "h2db"
     vbox.cpus = 4
     vbox.memory = 4096
     vbox.customize ["modifyvm", :id, "--nicpromisc2","allow-all"]
  end
  # private network
  config.vm.network "private_network", ip: "192.168.55.86", :netmask => "255.255.255.0"
  # bridge netwrok
  config.vm.network "public_network", ip: "192.168.1.86", :netmask => "255.255.255.0"
  config.vm.network "forwarded_port", guest:22, host:19022, id:"ssh"
  config.vm.provision "shell", inline: <<-SHELL
yum -y install unzip

# install JDK
yum -y install java-1.8.0-openjdk

# download h2 database
export h2db=h2-2017-06-10.zip
wget http://www.h2database.com/$h2db

# install h2 database
unzip $h2db
echo 'webAllowOthers = true' > ~root/.h2.server.properties
mv h2 /opt

# setup console as service
cp /vagrant/h2 /etc/sysconfig
cp /vagrant/h2.service /etc/systemd/system
systemctl enable h2.service
systemctl start h2.service

echo 'access URL: http://192.168.55.86:8082/'
echo 'username: sa    default password: sa'
echo 'Driver class: org.h2.Driver
echo 'JDBC URL: jdbc:h2:tcp://192.168.55.86/~/test'

SHELL
end
h2.service

[Unit]
Description=H2 database
After=syslog.target network.target

[Service]
Type=simple
EnvironmentFile=/etc/sysconfig/h2
WorkingDirectory=/opt/h2
ExecStart=/bin/java -cp "/opt/h2/bin/h2-1.4.196.jar:$H2CUSTOMJARS" org.h2.tools.Console
ExecStop=/bin/kill -3 ${MAINPID}

[Install]
WantedBy=multi-user.target
h2

H2CUSTOMJARS=

Vagrantを使用して、Kerberosサーバを構築する

以下のVagrantfileを使用して、Kerberosサーバを構築できます。 Vagrantfile

VAGRANTFILE_API_VERSION = "2"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
  config.vm.box = "bento/centos-7.3"
  config.vm.hostname = "krb5server.vm.internal"
  config.vm.provider :virtualbox do |vbox|
     vbox.name = "krb5server"
     vbox.cpus = 4
     vbox.memory = 8192
     vbox.customize ["modifyvm", :id, "--nicpromisc2","allow-all"]
  end
  # private network
  config.vm.network "private_network", ip: "192.168.55.87", :netmask => "255.255.255.0"
  # bridge netwrok
  config.vm.network "public_network", ip: "192.168.1.87", :netmask => "255.255.255.0"
  config.vm.network "forwarded_port", guest:22, host:18022, id:"ssh"
  config.vm.provision "shell", inline: <<-SHELL

# havegedのインストール
yum -y install epel-release
yum -y install haveged
systemctl enable haveged.service
systemctl start haveged.service

# kerberosインストール
yum -y install krb5-server krb5-workstation pam_krb5

# chrony設定
echo 'allow 192.168.1/24' >> /etc/chrony.conf
echo 'allow 192.168.55/24' >> /etc/chrony.conf

systemctl enable chronyd.service
systemctl start chronyd.service

# kdc.conf/kerb5/conf設定
sed -i -e 's/EXAMPLE.COM/VM.INTERNAL/g' /var/kerberos/krb5kdc/kdc.conf

kdb5_util create -r VM.INTERNAL -s -P admin

sed -i -e 's/# default_realm = EXAMPLE.COM/default_realm = VM.INTERNAL/' /etc/krb5.conf
sed -i -e 's/ default_ccache_name/#default_ccache_name/' /etc/krb5.conf
sed -i -e 's/\\[realms\\]/#[realms]/' /etc/krb5.conf
sed -i -e 's/\\[domain_realm\\]/#[domain_realm]/' /etc/krb5.conf

echo '' >> /etc/krb5.conf
echo '[realms]' >> /etc/krb5.conf
echo 'VM.INTERNAL = {' >> /etc/krb5.conf
echo '  kdc = krb5server.vm.internal' >> /etc/krb5.conf
echo '  admin_server = krb5server.vm.internal' >> /etc/krb5.conf
echo '}' >> /etc/krb5.conf
echo '' >> /etc/krb5.conf
echo '[domain_realm]' >> /etc/krb5.conf
echo '.vm.internal = VM.INTERNAL' >> /etc/krb5.conf
echo 'vm.internal = VM.INTERNAL' >> /etc/krb5.conf

sed -i -e 's/^/#/' /var/kerberos/krb5kdc/kadm5.acl
echo '*/admin@VM.INTERNAL *' >> /var/kerberos/krb5kdc/kadm5.acl

kadmin.local addprinc -pw "admin" root/admin

systemctl enable krb5kdc
systemctl start krb5kdc
systemctl enable kadmin
systemctl start kadmin

# ホスト追加
kadmin.local addprinc -randkey host/krb5server.vm.internal
kadmin.local ktadd host/krb5server.vm.internal

# ユーザ追加
useradd test
kadmin -p root/admin -w admin addprinc -pw test test
#kadmin.local ktadd  -norandkey -k /etc/krb5.keytab test
kadmin.local ktadd  -norandkey test
kadmin.local xst -norandkey -k test.keytab test@VM.INTERNAL


# sshd/ssh設定
echo 'KerberosAuthentication yes' >> /etc/ssh/sshd_config
sed -i -e 's/GSSAPIAuthentication no/GSSAPIAuthentication yes/' /etc/ssh/sshd_config
sed -i -e 's/GSSAPICleanupCredentials no/GSSAPICleanupCredentials yes/' /etc/ssh/sshd_config

echo 'Host *.vm.internal' >> /etc/ssh/ssh_config
echo '  GSSAPIAuthentication yes' >> /etc/ssh/ssh_config
echo '  GSSAPIDelegateCredentials yes' >> /etc/ssh/ssh_config
authconfig --enablekrb5 --update
systemctl restart sshd


SHELL
end

2017年8月13日日曜日

Apache NiFiがインストールされた仮想マシンをVagrantで作成する

Apache NiFiがインストールされた仮想マシンをVagrantで作成するには、以下のVagrantfile, nifi.serviceを同一フォルダに配置してvagrant upコマンドを実行します。仮想マシンのビルド完了後、http://192.168.1.88:8080/nifi/でApache NiFiの画面にアクセスできます。 Vagrantfile

VAGRANTFILE_API_VERSION = "2"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
  config.vm.box = "bento/centos-7.3"
  config.vm.hostname = "nifi"
  config.vm.provider :virtualbox do |vbox|
     vbox.name = "nifi"
     vbox.cpus = 4
     vbox.memory = 8192 
     vbox.customize ["modifyvm", :id, "--nicpromisc2","allow-all"]
  end
  # private network
  config.vm.network "private_network", ip: "192.168.55.88", :netmask => "255.255.255.0"
  # bridge netwrok
  config.vm.network "public_network", ip: "192.168.1.88", :netmask => "255.255.255.0"
  config.vm.network "forwarded_port", guest:22, host:18022, id:"ssh"
  config.vm.provision "shell", inline: <<-SHELL
# 8080ポート
firewall-cmd --permanent --add-port=8080/tcp

# maximum file handles & maximum forked processes
echo '*  hard  nofile  50000' >> /etc/security/limits.conf
echo '*  soft  nofile  50000' >> /etc/security/limits.conf
echo '*  hard  nproc  10000' >> /etc/security/limits.conf
echo '*  soft  nproc  10000' >> /etc/security/limits.conf

echo '*  soft  nproc  10000' >> /etc/security/limits.d/90-nproc

# javaをインストール
yum -y install java-1.8.0-openjdk

# Apache Nifiのダウンロード
wget http://ftp.riken.jp/net/apache/nifi/1.3.0/nifi-1.3.0-bin.tar.gz
tar xvfz nifi-1.3.0-bin.tar.gz
mv nifi-1.3.0 /opt

#サービスとして登録
cp /vagrant/nifi.service /etc/systemd/system
systemctl enable nifi.service
systemctl start nifi.service

echo 'access url -> http://192.168.55.88:8080/nifi/' 

SHELL
end
nifi.service

[Unit]
Description=Apache Nifi
After=syslog.target network.target

[Service]
Type=forking
ExecStart=/opt/nifi-1.3.0/bin/nifi.sh start
ExecStop=/opt/nifi-1.3.0/bin/nifi.sh stop
KillMode=none

[Install]
WantedBy=multi-user.target

2017年8月6日日曜日

VagrantとAmbari blueprintでSpark2の1ノードクラスタを構築する

以下のVagrantfileで、mysql, Ambari Server, Spark2などがインストールされた1ノードクラスタを構築できます。 Vagrantfile

VAGRANTFILE_API_VERSION = "2"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
  config.vm.box = "bento/centos-7.3"
  config.vm.hostname = "min-spark"
  config.vm.provider :virtualbox do |vbox|
     vbox.name = "min-spark"
     vbox.cpus = 4
     vbox.memory = 12288 
     vbox.customize ["modifyvm", :id, "--nicpromisc2","allow-all"]
  end
  # private network
  config.vm.network "private_network", ip: "192.168.55.20", :netmask => "255.255.255.0"
  # bridge netwrok
  config.vm.network "public_network", ip: "192.168.1.20", :netmask => "255.255.255.0"
  config.vm.network "forwarded_port", guest:22, host:10022, id:"ssh"
  config.vm.provision "shell", inline: <<-SHELL
# firewalld無効化
systemctl stop firewalld
systemctl disable firewalld

# mysqlインストール
sudo yum -y remove mariadb-libs
yum -y localinstall http://dev.mysql.com/get/mysql57-community-release-el7-7.noarch.rpm
yum -y install mysql mysql-devel mysql-server mysql-utilities
sudo systemctl enable mysqld.service
sudo systemctl start mysqld.service

# パスワードの変更とユーザの作成、DB作成
chkconfig mysqld on
service mysqld start
export MYSQL_ROOTPWD='Root123#'
export MYSQL_PWD=`cat /var/log/mysqld.log | awk '/temporary password/ {print $NF}'`
mysql -uroot --connect-expired-password -e "SET PASSWORD = PASSWORD('$MYSQL_ROOTPWD');"
export MYSQL_PWD=$MYSQL_ROOTPWD
export MYSQL_ROOTPWD='root'
mysql -uroot --connect-expired-password -e "UNINSTALL PLUGIN validate_password;"
mysql -uroot --connect-expired-password -e "SET PASSWORD = PASSWORD('$MYSQL_ROOTPWD');"
export MYSQL_PWD=$MYSQL_ROOTPWD
mysql -uroot --connect-expired-password -e "CREATE DATABASE ambari DEFAULT CHARACTER SET utf8;"
mysql -uroot --connect-expired-password -e "CREATE USER ambari@localhost IDENTIFIED BY 'bigdata';"
mysql -uroot --connect-expired-password -e "GRANT ALL PRIVILEGES ON ambari.* TO 'ambari'@'%' IDENTIFIED BY 'bigdata';"

mysql -uroot --connect-expired-password -e "CREATE DATABASE hive DEFAULT CHARACTER SET utf8;"
mysql -uroot --connect-expired-password -e "CREATE USER hive@localhost IDENTIFIED BY 'hive';"
mysql -uroot --connect-expired-password -e "GRANT ALL PRIVILEGES ON hive.* TO 'hive'@'%' IDENTIFIED BY 'hive';"

sudo systemctl stop mysqld.service
sudo cp /vagrant/my.cnf /etc
ln -s /var/lib/mysql/mysql.sock /tmp/mysql.sock
sudo systemctl start mysqld.service

# JDBCドライバーのインストール
yum -y install mysql-connector-java

# ---------
cd /etc/yum.repos.d/
wget http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/2.5.1.0/ambari.repo
yum -y install ambari-server ambari-agent


# AMBARI-20532
echo '' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.database=mysql' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.database_name=ambari' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.user.name=ambari' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.user.password=/etc/ambari-server/conf/password.dat' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.driver=/usr/share/java/mysql-connector-java.jar' >> /etc/ambari-server/conf/ambari.properties
echo 'custom.jdbc.name=mysql-connector-java.jar' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.hostname=localhost' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.port=3306' >> /etc/ambari-server/conf/ambari.properties
ambari-server setup -s --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar -v
ambari-server setup --silent

mysql -u ambari -pbigdata ambari < /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql

ambari-server start
ambari-agent start

# blueprintで1ノードクラスタを構築
curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://localhost:8080/api/v1/blueprints/min-spark -d @/vagrant/cluster_configuration.json

curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://localhost:8080/api/v1/clusters/min-spark -d @/vagrant/hostmapping.json
sleep 30

# 完了まで待機
Progress=`curl -s --user admin:admin -X GET http://localhost:8080/api/v1/clusters/min-spark/requests/1 | grep progress_percent | awk '{print $3}' | cut -d . -f 1`
while [[ `echo $Progress | grep -v 100` ]]; do
  Progress=`curl -s --user admin:admin -X GET http://localhost:8080/api/v1/clusters/min-spark/requests/1 | grep progress_percent | awk '{print $3}' | cut -d . -f 1`
  echo " Progress: $Progress%"
  sleep 30
done

# adminユーザのディレクトリを作成
sudo -u hdfs /bin/hdfs dfs -mkdir /user/admin
sudo -u hdfs /bin/hdfs dfs -chown admin /user/admin

# ユーザ追加
useradd test
echo test | passwd test --stdin

sudo -u hdfs /bin/hdfs dfs -mkdir /user/test
sudo -u hdfs /bin/hdfs dfs -chown test /user/test

#以下のようにsparkに接続可能
#beeline
#!connect jdbc:hive2://localhost:10016 test


SHELL
end
cluster_configuration.json

{
  "configurations" : [
    {
      "hive-site": {
        "hive.support.concurrency": "true",
        "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
        "hive.compactor.initiator.on": "true",
        "hive.compactor.worker.threads": "5",
        "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
        "javax.jdo.option.ConnectionPassword": "hive",
        "javax.jdo.option.ConnectionURL": "jdbc:mysql://localhost/hive",
        "javax.jdo.option.ConnectionUserName": "hive"
      }
    },
    {
      "hive-env": {
        "hive_ambari_database": "MySQL",
        "hive_database": "Existing MySQL Database",
        "hive_database_type": "mysql",
        "hive_database_name": "hive"
      }
    },
    {
      "core-site": {
        "properties" : {
          "hadoop.proxyuser.root.groups" : "*",
          "hadoop.proxyuser.root.hosts" : "*"
        }
      }
    }
  ],
  "host_groups" : [
    {
      "name" : "host_group_1",
      "components" : [
        {
          "name" : "NAMENODE"
        },
        {
          "name" : "SECONDARY_NAMENODE"
        },
        {
          "name" : "DATANODE"
        },
        {
          "name" : "HDFS_CLIENT"
        },
        {
          "name" : "RESOURCEMANAGER"
        },
        {
          "name" : "NODEMANAGER"
        },
        {
          "name" : "YARN_CLIENT"
        },
        {
          "name" : "HISTORYSERVER"
        },
        {
          "name" : "APP_TIMELINE_SERVER"
        },
        {
          "name" : "ZOOKEEPER_SERVER"
        },
        {
          "name" : "ZOOKEEPER_CLIENT"
        },
        {
          "name" : "METRICS_MONITOR"
        },
        {
          "name" : "TEZ_CLIENT"
        },
        {
          "name" : "HIVE_SERVER"
        },
        {
          "name" : "HIVE_METASTORE"
        },
        {
          "name" : "METRICS_COLLECTOR"
        },
        {
          "name" : "WEBHCAT_SERVER"
        },
        {
          "name" : "PIG"
        },
        {
          "name" : "SLIDER"
        },
        {
          "name" : "SPARK2_JOBHISTORYSERVER"
        },
        {
          "name" : "SPARK2_CLIENT"
        },
        {
          "name": "SPARK2_THRIFTSERVER"
        },
        {
          "name": "LIVY2_SERVER"
        }
      ],
      "cardinality" : "1"
    }
  ],
  "settings" : [{
     "recovery_settings" : [{
       "recovery_enabled" : "true"
    }]
  }],
  "Blueprints" : {
    "blueprint_name" : "min-spark",
    "stack_name" : "HDP",
    "stack_version" : "2.6"
  }
}
hostmapping.json

{
  "blueprint" : "min-spark",
  "default_password" : "admin",
  "provision_action" : "INSTALL_AND_START",
  "host_groups" :[
    {
      "name" : "host_group_1",
      "hosts" : [
        {
          "fqdn" : "min-spark"
        }
      ]
    }
  ]
}
my.cnf

[client]
port            = 3306
socket          = /var/lib/mysql/mysql.sock
default-character-set=utf8

[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
bind-address = 0.0.0.0
port            = 3306
key_buffer_size = 256M
max_allowed_packet = 16M
table_open_cache = 16
innodb_buffer_pool_size = 512M
innodb_log_file_size = 32M
sort_buffer_size = 8M
read_buffer_size = 8M
read_rnd_buffer_size = 8M
join_buffer_size = 8M
thread_stack = 4M
character-set-server=utf8
lower_case_table_names = 1
innodb_lock_wait_timeout=120
skip-innodb-doublewrite

[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid

2017年8月3日木曜日

VagrantとAmbari blueprintでhiveの1ノードクラスタを作成する

以下のVagrantfileで、mysql, Ambari Server, Hiveなどがインストールされた1ノードクラスタを構築できます。

VAGRANTFILE_API_VERSION = "2"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
  config.vm.box = "bento/centos-7.3"
  config.vm.hostname = "min-hive"
  config.vm.provider :virtualbox do |vbox|
     vbox.name = "min-hive"
     vbox.cpus = 4
     vbox.memory = 12288 
     vbox.customize ["modifyvm", :id, "--nicpromisc2","allow-all"]
  end
  # private network
  config.vm.network "private_network", ip: "192.168.55.20", :netmask => "255.255.255.0"
  # bridge netwrok
  config.vm.network "public_network", ip: "192.168.1.20", :netmask => "255.255.255.0"
  config.vm.network "forwarded_port", guest:22, host:10022, id:"ssh"
  config.vm.provision "shell", inline: <<-SHELL
# firewalld無効化
systemctl stop firewalld
systemctl disable firewalld

# mysqlインストール
sudo yum -y remove mariadb-libs
yum -y localinstall http://dev.mysql.com/get/mysql57-community-release-el7-7.noarch.rpm
yum -y install mysql mysql-devel mysql-server mysql-utilities
sudo systemctl enable mysqld.service
sudo systemctl start mysqld.service

# パスワードの変更とユーザの作成、DB作成
chkconfig mysqld on
service mysqld start
export MYSQL_ROOTPWD='Root123#'
export MYSQL_PWD=`cat /var/log/mysqld.log | awk '/temporary password/ {print $NF}'`
mysql -uroot --connect-expired-password -e "SET PASSWORD = PASSWORD('$MYSQL_ROOTPWD');"
export MYSQL_PWD=$MYSQL_ROOTPWD
export MYSQL_ROOTPWD='root'
mysql -uroot --connect-expired-password -e "UNINSTALL PLUGIN validate_password;"
mysql -uroot --connect-expired-password -e "SET PASSWORD = PASSWORD('$MYSQL_ROOTPWD');"
export MYSQL_PWD=$MYSQL_ROOTPWD
mysql -uroot --connect-expired-password -e "CREATE DATABASE ambari DEFAULT CHARACTER SET utf8;"
mysql -uroot --connect-expired-password -e "CREATE USER ambari@localhost IDENTIFIED BY 'bigdata';"
mysql -uroot --connect-expired-password -e "GRANT ALL PRIVILEGES ON ambari.* TO 'ambari'@'%' IDENTIFIED BY 'bigdata';"

mysql -uroot --connect-expired-password -e "CREATE DATABASE hive DEFAULT CHARACTER SET utf8;"
mysql -uroot --connect-expired-password -e "CREATE USER hive@localhost IDENTIFIED BY 'hive';"
mysql -uroot --connect-expired-password -e "GRANT ALL PRIVILEGES ON hive.* TO 'hive'@'%' IDENTIFIED BY 'hive';"

sudo systemctl stop mysqld.service
sudo cp /vagrant/my.cnf /etc
ln -s /var/lib/mysql/mysql.sock /tmp/mysql.sock
sudo systemctl start mysqld.service

# JDBCドライバーのインストール
yum -y install mysql-connector-java

# ---------
cd /etc/yum.repos.d/
wget http://public-repo-1.hortonworks.com/ambari/centos7/2.x/updates/2.5.1.0/ambari.repo
yum -y install ambari-server ambari-agent


# AMBARI-20532
echo '' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.database=mysql' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.database_name=ambari' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.user.name=ambari' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.user.password=/etc/ambari-server/conf/password.dat' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.driver=/usr/share/java/mysql-connector-java.jar' >> /etc/ambari-server/conf/ambari.properties
echo 'custom.jdbc.name=mysql-connector-java.jar' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.hostname=localhost' >> /etc/ambari-server/conf/ambari.properties
echo 'server.jdbc.port=3306' >> /etc/ambari-server/conf/ambari.properties
ambari-server setup -s --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar -v
ambari-server setup --silent

mysql -u ambari -pbigdata ambari < /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql

ambari-server start
ambari-agent start

# blueprintで1ノードクラスタを構築
curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://localhost:8080/api/v1/blueprints/min-hive -d @/vagrant/cluster_configuration.json

curl -H "X-Requested-By: ambari" -X POST -u admin:admin http://localhost:8080/api/v1/clusters/min-hive -d @/vagrant/hostmapping.json
sleep 30

# 完了まで待機
Progress=`curl -s --user admin:admin -X GET http://localhost:8080/api/v1/clusters/min-hive/requests/1 | grep progress_percent | awk '{print $3}' | cut -d . -f 1`
while [[ `echo $Progress | grep -v 100` ]]; do
  Progress=`curl -s --user admin:admin -X GET http://localhost:8080/api/v1/clusters/min-hive/requests/1 | grep progress_percent | awk '{print $3}' | cut -d . -f 1`
  echo " Progress: $Progress%"
  sleep 30
done

# adminユーザのディレクトリを作成
sudo -u hdfs /usr/bin/hdfs dfs -mkdir /user/admin
sudo -u hdfs /usr/bin/hdfs dfs -chown admin /user/admin

# ユーザ追加
useradd test
echo test | passwd test --stdin

sudo -u hdfs /usr/bin/hdfs dfs -mkdir /user/test
sudo -u hdfs /usr/bin/hdfs dfs -chown test /user/test

SHELL
end
cluster_configuration.json

{
  "configurations" : [
    {
      "hive-site": {
        "hive.support.concurrency": "true",
        "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
        "hive.compactor.initiator.on": "true",
        "hive.compactor.worker.threads": "5",
        "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
        "javax.jdo.option.ConnectionPassword": "hive",
        "javax.jdo.option.ConnectionURL": "jdbc:mysql://localhost/hive",
        "javax.jdo.option.ConnectionUserName": "hive"
      }
    },
    {
      "hive-env": {
        "hive_ambari_database": "MySQL",
        "hive_database": "Existing MySQL Database",
        "hive_database_type": "mysql",
        "hive_database_name": "hive"
      }
    },
    {
      "core-site": {
        "properties" : {
          "hadoop.proxyuser.root.groups" : "*",
          "hadoop.proxyuser.root.hosts" : "*"
        }
      }
    }
  ],
  "host_groups" : [
    {
      "name" : "host_group_1",
      "components" : [
        {
          "name" : "NAMENODE"
        },
        {
          "name" : "SECONDARY_NAMENODE"
        },
        {
          "name" : "DATANODE"
        },
        {
          "name" : "HDFS_CLIENT"
        },
        {
          "name" : "RESOURCEMANAGER"
        },
        {
          "name" : "NODEMANAGER"
        },
        {
          "name" : "YARN_CLIENT"
        },
        {
          "name" : "HISTORYSERVER"
        },
        {
          "name" : "APP_TIMELINE_SERVER"
        },
        {
          "name" : "ZOOKEEPER_SERVER"
        },
        {
          "name" : "ZOOKEEPER_CLIENT"
        },
        {
          "name" : "METRICS_MONITOR"
        },
        {
          "name" : "TEZ_CLIENT"
        },
        {
          "name" : "HIVE_SERVER"
        },
        {
          "name" : "HIVE_METASTORE"
        },
        {
          "name" : "METRICS_COLLECTOR"
        },
        {
          "name" : "WEBHCAT_SERVER"
        }
      ],
      "cardinality" : "1"
    }
  ],
  "settings" : [{
     "recovery_settings" : [{
       "recovery_enabled" : "true"
    }]
  }],
  "Blueprints" : {
    "blueprint_name" : "min-hive",
    "stack_name" : "HDP",
    "stack_version" : "2.6"
  }
}
hostmapping.json

{
  "blueprint" : "min-hive",
  "default_password" : "admin",
  "provision_action" : "INSTALL_AND_START",
  "host_groups" :[
    {
      "name" : "host_group_1",
      "hosts" : [
        {
          "fqdn" : "min-hive"
        }
      ]
    }
  ]
}
my.cnf

[client]
port            = 3306
socket          = /var/lib/mysql/mysql.sock
default-character-set=utf8

[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
bind-address = 0.0.0.0
port            = 3306
key_buffer_size = 256M
max_allowed_packet = 16M
table_open_cache = 16
innodb_buffer_pool_size = 512M
innodb_log_file_size = 32M
sort_buffer_size = 8M
read_buffer_size = 8M
read_rnd_buffer_size = 8M
join_buffer_size = 8M
thread_stack = 4M
character-set-server=utf8
lower_case_table_names = 1
innodb_lock_wait_timeout=120
skip-innodb-doublewrite

[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid