文章详情

短信预约-IT技能 免费直播动态提醒

请输入下面的图形验证码

提交验证

短信预约提醒成功

教你在k8s上部署HADOOP-3.2.2(HDFS)的方法

2024-04-02 19:55

关注

环境+版本
k8s: v1.21.1
hadoop: 3.2.2

dockerfile

FROM openjdk:8-jdk
# 如果要通过ssh连接容器内部,添加自己的公钥(非必须)
ARG SSH_PUB='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3nTRJ/aVb67l1xMaN36jmIbabU7Hiv/xpZ8bwLVvNO3Bj7kUzYTp7DIbPcHQg4d6EsPC6j91E8zW6CrV2fo2Ai8tDO/rCq9Se/64F3+8oEIiI6E/OfUZfXD1mPbG7M/kcA3VeQP6wxNPhWBbKRisqgUc6VTKhl+hK6LwRTZgeShxSNcey+HZst52wJxjQkNG+7CAEY5bbmBzAlHCSl4Z0RftYTHR3q8LcEg7YLNZasUogX68kBgRrb+jw1pRMNo7o7RI9xliDAGX+E4C3vVZL0IsccKgr90222axsADoEjC9O+Q6uwKjahemOVaau+9sHIwkelcOcCzW5SuAwkezv 805899926@qq.com'
RUN apt-get update;
RUN apt-get install -y openssh-server net-tools vim git;
RUN sed -i -r 's/^\s*UseDNS\s+\w+/#\0/; s/^\s*PasswordAuthentication\s+\w+/#\0/; s/^\s*ClientAliveInterval\s+\w+/#\0/' /etc/ssh/sshd_config;
RUN echo 'UseDNS no \nPermitRootLogin yes \nPasswordAuthentication yes \nClientAliveInterval 30' >> /etc/ssh/sshd_config;
RUN cat /etc/ssh/sshd_config
RUN su root bash -c 'cd;mkdir .ssh;chmod 700 .ssh;echo ${SSH_PUB} > .ssh/authorized_keys;chmod 644 .ssh/authorized_keys'
RUN su root bash -c 'cd;ssh-keygen -t rsa -f ~/.ssh/id_rsa; cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys'
# hadoop
ENV HADOOP_TGZ_URL=https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.2.2/hadoop-3.2.2.tar.gz
ENV HADOOP_HOME=/opt/hadoop
ENV PATH=$HADOOP_HOME/bin:$PATH
RUN set -ex; \
    mkdir -p $HADOOP_HOME; \
    wget -nv -O $HADOOP_HOME/src.tgz $HADOOP_TGZ_URL; \
    tar -xf $HADOOP_HOME/src.tgz --strip-components=1 -C $HADOOP_HOME; \
    rm $HADOOP_HOME/src.tgz; \
    chown -R root:root $HADOOP_HOME; \
RUN mkdir -p $HADOOP_HOME/hdfs/name/ && mkdir -p $HADOOP_HOME/hdfs/data/
# clean trash file or dir
RUN rm -rf $HADOOP_HOME/share/doc/;
COPY docker-entrypoint.sh /
EXPOSE 22 9870 9000
ENTRYPOINT ["/docker-entrypoint.sh"]

docker-entrypoint.sh

#!/bin/bash
set -e
service ssh start
hdfs_dir=$HADOOP_HOME/hdfs/
if [ $HADOOP_NODE_TYPE = "datanode" ]; then
  echo -e "\033[32m start datanode \033[0m"
  $HADOOP_HOME/bin/hdfs datanode -regular
fi
if [ $HADOOP_NODE_TYPE = "namenode" ]; then
  if [ -z $(ls -A ${hdfs_dir}) ]; then
    echo -e "\033[32m start hdfs namenode format \033[0m"
    $HADOOP_HOME/bin/hdfs namenode -format
  fi
  echo -e "\033[32m start hdfs namenode \033[0m"
  $HADOOP_HOME/bin/hdfs namenode

pod template

apiVersion: v1
kind: ConfigMap
metadata:
  name: hadoop
  namespace: big-data
  labels:
    app: hadoop
data:
  hadoop-env.sh: |
    export HDFS_DATANODE_USER=root
    export HDFS_NAMENODE_USER=root
    export HDFS_SECONDARYNAMENODE_USER=root
    export JAVA_HOME=/usr/local/openjdk-8
    export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
    export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib/native"
  core-site.xml: |
    <?xml version="1.0" encoding="UTF-8"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl" rel="external nofollow"  rel="external nofollow" ?>
    <configuration>
        <property>
            <name>fs.defaultFS</name>
            <value>hdfs://hadoop-master:9000</value>
        </property>
        <property>
            <name>dfs.namenode.rpc-bind-host</name>
            <value>0.0.0.0</value>
        </property>
    </configuration>
  hdfs-site.xml: |
    <?xml version="1.0" encoding="UTF-8"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl" rel="external nofollow"  rel="external nofollow" ?>
    <configuration>
        <property>
            <name>dfs.namenode.name.dir</name>
            <value>file:///opt/hadoop/hdfs/name</value>
        </property>
        <property>
            <name>dfs.datanode.data.dir</name>
            <value>file:///opt/hadoop/hdfs/data</value>
        </property>
        <property>
            <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
            <value>false</value>
        </property>
        <property>
            <name>dfs.replication</name>
            <value>1</value>
        </property>
    </configuration>
---
# namenode svc
apiVersion: v1
kind: Service
metadata:
  name: hadoop-master
  namespace: big-data
spec:
  selector:
    app: hadoop-namenode
  type: NodePort
  ports:
    - name: rpc
      port: 9000
      targetPort: 9000
    - name: http
      port: 9870
      targetPort: 9870
      nodePort: 9870
# namenode pod
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: hadoop-namenode
  namespace: big-data
spec:
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: hadoop-namenode
  template:
    metadata:
      labels:
        app: hadoop-namenode
    spec:
      volumes:
        - name: hadoop-env
          configMap:
            name: hadoop
            items:
              - key: hadoop-env.sh
                path: hadoop-env.sh
        - name: core-site
          configMap:
            name: hadoop
            items:
              - key: core-site.xml
                path: core-site.xml
        - name: hdfs-site
          configMap:
            name: hadoop
            items:
              - key: hdfs-site.xml
                path: hdfs-site.xml
        - name: hadoop-data
          persistentVolumeClaim:
            claimName: data-hadoop-namenode
      containers:
        - name: hadoop
          image: registry:5000/hadoop
          imagePullPolicy: Always
          ports:
            - containerPort: 22
            - containerPort: 9000
            - containerPort: 9870
          volumeMounts:
            - name: hadoop-env
              mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh
              subPath: hadoop-env.sh
            - name: core-site
              mountPath: /opt/hadoop/etc/hadoop/core-site.xml
              subPath: core-site.xml
            - name: hdfs-site
              mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
              subPath: hdfs-site.xml
            - name: hadoop-data
              mountPath: /opt/hadoop/hdfs/
              subPath: hdfs
            - name: hadoop-data
              mountPath: /opt/hadoop/logs/
              subPath: logs
          env:
            - name: HADOOP_NODE_TYPE
              value: namenode
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: data-hadoop-namenode
  namespace: big-data
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 256Gi
  storageClassName: "managed-nfs-storage"
# datanode pod
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: hadoop-datanode
  namespace: big-data
spec:
  replicas: 2
  selector:
    matchLabels:
      app: hadoop-datanode
  serviceName: hadoop-datanode
  template:
    metadata:
      labels:
        app: hadoop-datanode
    spec:
      volumes:
        - name: hadoop-env
          configMap:
            name: hadoop
            items:
              - key: hadoop-env.sh
                path: hadoop-env.sh
        - name: core-site
          configMap:
            name: hadoop
            items:
              - key: core-site.xml
                path: core-site.xml
        - name: hdfs-site
          configMap:
            name: hadoop
            items:
              - key: hdfs-site.xml
                path: hdfs-site.xml
      containers:
        - name: hadoop
          image: registry:5000/hadoop
          imagePullPolicy: Always
          ports:
            - containerPort: 22
            - containerPort: 9000
            - containerPort: 9870
          volumeMounts:
            - name: hadoop-env
              mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh
              subPath: hadoop-env.sh
            - name: core-site
              mountPath: /opt/hadoop/etc/hadoop/core-site.xml
              subPath: core-site.xml
            - name: hdfs-site
              mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
              subPath: hdfs-site.xml
            - name: data
              mountPath: /opt/hadoop/hdfs/
              subPath: hdfs
            - name: data
              mountPath: /opt/hadoop/logs/
              subPath: logs
          env:
            - name: HADOOP_NODE_TYPE
              value: datanode
  volumeClaimTemplates:
    - metadata:
        name: data
        namespace: big-data
      spec:
        accessModes:
          - ReadWriteMany
        resources:
          requests:
            storage: 256Gi
        storageClassName: "managed-nfs-storage"

到此这篇关于k8s-部署HADOOP-3.2.2(HDFS)的文章就介绍到这了,更多相关k8s部署hadoop内容请搜索编程网以前的文章或继续浏览下面的相关文章希望大家以后多多支持编程网!

阅读原文内容投诉

免责声明:

① 本站未注明“稿件来源”的信息均来自网络整理。其文字、图片和音视频稿件的所属权归原作者所有。本站收集整理出于非商业性的教育和科研之目的,并不意味着本站赞同其观点或证实其内容的真实性。仅作为临时的测试数据,供内部测试之用。本站并未授权任何人以任何方式主动获取本站任何信息。

② 本站未注明“稿件来源”的临时测试数据将在测试完成后最终做删除处理。有问题或投稿请发送至: 邮箱/279061341@qq.com QQ/279061341

软考中级精品资料免费领

  • 历年真题答案解析
  • 备考技巧名师总结
  • 高频考点精准押题
  • 2024年上半年信息系统项目管理师第二批次真题及答案解析(完整版)

    难度     807人已做
    查看
  • 【考后总结】2024年5月26日信息系统项目管理师第2批次考情分析

    难度     351人已做
    查看
  • 【考后总结】2024年5月25日信息系统项目管理师第1批次考情分析

    难度     314人已做
    查看
  • 2024年上半年软考高项第一、二批次真题考点汇总(完整版)

    难度     433人已做
    查看
  • 2024年上半年系统架构设计师考试综合知识真题

    难度     221人已做
    查看

相关文章

发现更多好内容

猜你喜欢

AI推送时光机
位置:首页-资讯-服务器
咦!没有更多了?去看看其它编程学习网 内容吧
首页课程
资料下载
问答资讯