当前位置: 首页 > article >正文

openEuler Linux 部署 HadoopHA

openEuler Linux 部署 HadoopHA

升级操作系统和软件

yum -y update

升级后建议重启

安装常用软件

yum -y install gcc gcc-c++ autoconf automake cmake make rsync vim man zip unzip net-tools zlib zlib-devel openssl openssl-devel pcre-devel tcpdump lrzsz tar wget

修改主机名

hostnamectl set-hostname hadoop
或者
vim /etc/hostname 
spark01
reboot

修改IP地址

vim /etc/sysconfig/network-scripts/ifcfg-ens160

网卡 配置文件示例

TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
NAME=ens160
UUID=943779e9-249c-44bb-b272-d49ea5831ed4
DEVICE=ens160
ONBOOT=yes
IPADDR=192.168.28.11
PREFIX=24
GATEWAY=192.168.28.2
DNS1=192.168.28.2

保存后
nmcli con up ens160
重启网络服务

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld
vim /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
# SELINUX=enforcing
# SELINUXTYPE= can take one of three two values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected. 
#     mls - Multi Level Security protection.
# SELINUXTYPE=targeted 

SELINUX=disabled

执行下面命令

setenforce 0

或者

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0

创建软件安装目录并上传软件,配置环境变量

mkdir -p /opt/soft
cd /opt/soft
# 上传jdk zookeeper
tar -zxvf jdk-8u361-linux-x64.tar.gz
mv jdk1.8.0_361 jdk8
tar -zxvf hadoop-3.3.5.tar.gz
mv hadoop-3.3.5 hadoop3

vim /etc/profile
	
export JAVA_HOME=/opt/soft/jdk8
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin

export HADOOP_HOME=/opt/soft/hadoop3

export HADOOP_INSTALL=${HADOOP_HOME}
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export PATH=${PATH}:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop

export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root

编辑完成后使用source命令使文件~/.bash_profile生效执行以下命令
source /etc/profile
检查环境变量
printenv

修改域名映射

vim /etc/hosts
192.168.28.11 spark01
192.168.28.12 spark02
192.168.28.13 spark03

修改后建议重启

修改Hadoop配置文件 在hadoop解压后的目录找到 etc/hadoop目录

cd /opt/soft/hadoop3

修改如下配置文件

  • hadoop-env.sh
  • core-site.xml
  • hdfs-site.xml
  • workers
  • mapred-site.xml
  • yarn-site.xml

hadoop-env.sh 文件末尾追加

export JAVA_HOME=/opt/soft/jdk8
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_ZKFC_USER=root
export HDFS_JOURNALNODE_USER=root

export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root

core-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://puegg</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/home/hadoop_data</value>
    </property>
    <property>
        <name>hadoop.http.staticuser.user</name>
        <value>root</value>
    </property>
    <!-- HDFS连接zookeeper集群的地址和端口 -->
    <property>
        <name>ha.zookeeper.quorum</name>
        <value>spark01:2181,spark02:2181,spark03:2181</value>
    </property>
    <property>
        <name>dfs.permissions.enabled</name>
        <value>false</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.groups</name>
        <value>*</value>
    </property>
</configuration>

hdfs-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 定义hdfs集群ID号 -->
    <property>
        <name>dfs.nameservices</name>
        <value>puegg</value>
    </property>
    <!-- 定义hdfs集群中namenode的ID号 -->
    <property>
        <name>dfs.ha.namenodes.puegg</name>
        <value>nn1,nn2</value>
    </property>
    <!-- 定义namenode的主机名和RPC协议的端口 -->
    <property>
        <name>dfs.namenode.rpc-address.puegg.nn1</name>
        <value>spark01:8020</value>
    </property>
    <property>
        <name>dfs.namenode.rpc-address.puegg.nn2</name>
        <value>spark02:8020</value>
    </property>
    <!-- 定义namenode的主机名和HTTP协议的端口 -->
    <property>
        <name>dfs.namenode.http-address.puegg.nn1</name>
        <value>spark01:9870</value>
    </property>
    <property>
        <name>dfs.namenode.http-address.puegg.nn2</name>
        <value>spark02:9870</value>
    </property>
    <!-- 定义共享edits的URL -->
    <property>
        <name>dfs.namenode.shared.edits.dir</name>
        <value>qjournal://spark01:8485;spark02:8485;spark03:8485/puegg</value>
    </property>
    <!-- 定义HDFS的客户端连接HDFS集群时返回active namenode地址 -->
    <property>
        <name>dfs.client.failover.proxy.provider.puegg</name>
        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    <!-- HDFS集群中两个namenode切换状态时的隔离方法 -->
    <property>
        <name>dfs.ha.fencing.methods</name>
        <value>sshfence</value>
    </property>
    <!-- HDFS集群中两个namenode切换状态时的隔离方法的密钥 -->
    <property>
        <name>dfs.ha.fencing.ssh.private-key-files</name>
        <value>/root/.ssh/id_rsa</value>
    </property>

    <!-- journalnode集群中用于保存edits文件的目录 -->
    <property>
        <name>dfs.journalnode.edits.dir</name>
        <value>/opt/journalnode/data</value>
    </property>

    <!-- HA的HDFS集群自动切换namenode的开关-->
    <property>
        <name>dfs.ha.automatic-failover.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.safemode.threshold.pct</name>
        <value>1</value>
        <description>
            Specifies the percentage of blocks that should satisfy
            the minimal replication requirement defined by dfs.replication.min.
            Values less than or equal to 0 mean not to wait for any particular
            percentage of blocks before exiting safemode.
            Values greater than 1 will make safe mode permanent.
        </description>
    </property>:q!
</configuration>

workers

spark01
spark02
spark03

mapred-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.application.classpath</name>
        <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
    </property>
</configuration>

yarn-site.xml

<?xml version="1.0"?>
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
  <name>yarn.resourcemanager.ha.enabled</name>
  <value>true</value>
</property>
<property>
  <name>yarn.resourcemanager.cluster-id</name>
  <value>cluster1</value>
</property>
<property>
  <name>yarn.resourcemanager.ha.rm-ids</name>
<property>
  <name>yarn.resourcemanager.hostname.rm1</name>
  <value>spark01</value>
</property>
<property>
  <name>yarn.resourcemanager.hostname.rm2</name>
  <value>spark02</value>
</property>
<property>
  <name>yarn.resourcemanager.webapp.address.rm1</name>
  <value>spark01:8088</value>
</property>
<property>
  <name>yarn.resourcemanager.webapp.address.rm2</name>
  <value>spark02:8088</value>
</property>
<property>
  <name>yarn.resourcemanager.zk-address</name>
  <value>spark01:2181,spark02:2181,spark03:2181</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>

<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
</configuration>

配置ssh免密钥登录

创建本地秘钥并将公共秘钥写入认证文件

ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
# 或者
ssh-copy-id spark01
ssh-copy-id spark02
ssh-copy-id spark03
scp -rv  ~/.ssh root@spark02:~/
scp -rv  ~/.ssh root@spark03:~/
# 远程登录自己
ssh spark01
# Are you sure you want to continue connecting (yes/no)? 此处输入yes
# 登录成功后exit或者logout返回
exit

拷贝配置文件分发到其它服务器或者使用脚本分发

scp -v /etc/profile root@spark02:/etc
scp -v /etc/profile root@spark03:/etc
scp -rv /opt/soft/hadoop3/etc/hadoop/* root@spark02:/opt/soft/hadoop3/etc/hadoop/
scp -rv /opt/soft/hadoop3/etc/hadoop/* root@spark03:/opt/soft/hadoop3/etc/hadoop/

在各服务器上使环境变量生效

source /etc/profile

Hadoop初始化

# 创建数据目录
mkdir -p /home/hadoop_data
1.	启动三个zookeeper:zkServer.sh start
2.	启动三个JournalNode:hadoop-daemon.sh start journalnode
7.	在其中一个namenode上格式化:hdfs namenode -format
8.	把刚刚格式化之后的元数据拷贝到另外一个namenode上
    a)	启动刚刚格式化的namenode :hadoop-daemon.sh start namenode
    b)	在没有格式化的namenode上执行:hdfs namenode -bootstrapStandby
    c)	启动第二个namenode: hadoop-daemon.sh start namenode
9.	在其中一个namenode上初始化hdfs zkfc -formatZK
10.	停止上面节点:stop-dfs.sh
11.	全面启动:start-dfs.sh
12. 启动resourcemanager节点 yarn-daemon.sh start resourcemanager
yarn --daemon start

http://dl.bintray.com/sequenceiq/sequenceiq-bin/hadoop-native-64-2.5.0.tar


13、安全模式

hdfs dfsadmin -safemode enter  
hdfs dfsadmin -safemode leave


14、查看哪些节点是namenodes并获取其状态
hdfs getconf -namenodes
hdfs haadmin -getServiceState spark01

15、强制切换状态
hdfs haadmin -transitionToActive --forcemanual spark01

重点提示:

# 关机之前 依关闭服务
stop-yarn.sh
stop-dfs.sh
# 开机后 依次开启服务
start-dfs.sh
start-yarn.sh

或者

# 关机之前关闭服务
stop-all.sh
# 开机后开启服务
start-all.sh
#jps 检查进程正常后开启胡哦关闭在再做其它操作

http://www.kler.cn/a/7324.html

相关文章:

  • Redis五大基本类型——String字符串命令详解(命令用法详解+思维导图详解)
  • vue之axios根据某个接口创建实例,并设置headers和超时时间,捕捉异常
  • candence : 通孔焊盘、插装器件封装绘制
  • 【CV】头盔检测区域入侵项目
  • 树状数组+概率论,ABC380G - Another Shuffle Window
  • 【Qt实现虚拟键盘】
  • 安装k8s工具之二-kubeasz
  • session和jwt哪个更好
  • Visio导入CAD绘图问题总结-更改形状线条颜色问题解决
  • Java ---多态
  • 利用CMake工具从源码编译出osgEarth库
  • Java基础——可变参数,集合操作工具类Collections
  • 【NLP入门教程】五、命名实体识别
  • 微服务学习-SpringCloud -Nacos (单机部署)
  • Windows 上使用 VS2022 使用远程Linux 系统调试运行代码
  • Linux中的算法分离手段
  • Git Commit Message 应该怎么写?
  • 【案例实践】MCM箱模型实践技术应用与O3形成途径、生成潜势、敏感性分析
  • 【创作赢红包】Activity初窥门径
  • C#,初学琼林(04)——查询(搜索)数组内指定(值)的元素与全文检索“倒排序”技术的实现代码源程序
  • OMG,太牛了!!!华为18级架构师总结出24W字Java面试手册
  • 伪数组和真数组的区别
  • 批发进销存管理软件,商品分类管理,对商品分类批量价格管理,商品分类导入导出的操作
  • acwing1047. 糖果
  • 技术宅小伙:看看面试官都问了些什么(Java系列)
  • SpringCloud服务注册中心——Eureka