ITPub博客

首页 > 数据库 > SQL on Hadoop > arm麒麟环境安装hadoop

arm麒麟环境安装hadoop

原创 SQL on Hadoop 作者:joshliu 时间:2021-03-03 16:12:25 0 删除 编辑

1、下载安装包,并上传服务器

2、创建安装目录,并解压安装包

[root@node2 ~]# mkdir /usr/local/hadoop
[root@node2 ~]# tar -zxf hadoop-3.1.1-arm64.tar.gz -C /usr/local/hadoop

3、修改配置文件

[root@node2 ~]# cd /usr/local/hadoop/hadoop-3.1.1/etc/hadoop/
[root@node2 ~]# head hadoop-env.sh
.  /etc/profile  ## 添加该行
[root@node2 hadoop]# cat core-site.xml
<configuration>
    <!-- 指定HDFS老大(namenode)的通信地址 -->
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://node2:9000</value>
    </property>
    <!-- 指定hadoop运行时产生文件的存储路径 -->
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/usr/local/hadoop/hadoop-3.1.1/data/tmp</value>
    </property>
</configuration>
[root@node2 hadoop]# cat hdfs-site.xml
<configuration>
    <!-- 设置namenode的http通讯地址 -->
    <property>
        <name>dfs.namenode.http-address</name>
        <value>node2:50070</value>
    </property>
    <!-- 设置secondarynamenode的http通讯地址 -->
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>node3:50090</value>
    </property>
    <!-- 设置namenode存放的路径 -->
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>/usr/local/hadoop/hadoop-3.1.1/data/name</value>
    </property>
    <!-- 设置hdfs副本数量 -->
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <!-- 设置datanode存放的路径 -->
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>/usr/local/hadoop/hadoop-3.1.1/data/datanode</value>
    </property>
    <property>
        <name>dfs.permissions</name>
        <value>false</value>
    </property>
</configuration>
[root@node2 hadoop]# cat mapred-site.xml
<configuration>
    <!-- 通知框架MR使用YARN -->
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.application.classpath</name>
        <value>
        /usr/local/hadoop/hadoop-3.1.1/etc/hadoop,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/common/*,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/common/lib/*,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/hdfs/*,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/hdfs/lib/*,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/mapreduce/*,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/mapreduce/lib/*,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/yarn/*,
        /usr/local/hadoop/hadoop-3.1.1/share/hadoop/yarn/lib/*
        </value>
    </property>
</configuration>
[root@node2 hadoop]# cat yarn-site.xml
<configuration>
<!-- Site specific YARN configuration properties -->
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>node3</value>
    </property>
    <property>
        <description>The http address of the RM web application.</description>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>${yarn.resourcemanager.hostname}:8088</value>
    </property>
    <property>
        <description>The address of the applications manager interface in the RM.</description>
        <name>yarn.resourcemanager.address</name>
        <value>${yarn.resourcemanager.hostname}:8032</value>
    </property>
    <property>
        <description>The address of the scheduler interface.</description>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>${yarn.resourcemanager.hostname}:8030</value>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>${yarn.resourcemanager.hostname}:8031</value>
    </property>
    <property>
        <description>The address of the RM admin interface.</description>
        <name>yarn.resourcemanager.admin.address</name>
        <value>${yarn.resourcemanager.hostname}:8033</value>
    </property>
</configuration>
[root@node2 hadoop]# cat masters 
node3
[root@node2 hadoop]# cat slaves 
node2
node4
[root@node2 hadoop]# cat workers 
node2
node3
node4

4、拷贝到各个节点

[root@node2 conf]# cd
[root@node2 ~]# scp -r /usr/local/hadoop node3:$PWD
[root@node2 ~]# scp -r /usr/local/hadoop node4:$PWD

5、添加环境变量

[root@node2 ~]# cat /etc/profile ## 三个节点均添加如下环境变量,并是环境变量生效
#hadoope
export HADOOP_HOME=/usr/local/hadoop/hadoop-3.1.1
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
[root@node2 ~]# source /etc/profile

6、启动Hadoop环境

[root@node2 ~]# start-all.sh

6、查看进程和hdfs访问文件系统

[root@node2 ~]# jps
253255 Jps
236692 DataNode
236276 NameNode
[root@node2 ~]# hdfs dfs -mkdir /test
[root@node2 ~]# hdfs dfs -ls /
Found 1 items
drwxr-xr-x   - root supergroup          0 2021-03-03 15:03 /test


来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/69985104/viewspace-2760923/,如需转载,请注明出处,否则将追究法律责任。

请登录后发表评论 登录
全部评论

注册时间:2020-10-09

  • 博文量
    74
  • 访问量
    30979