教你在k8s上部署HADOOP-3.2.2(HDFS)的方法
时间:2025-12-21 05:30:02|栏目:其它服务器|点击: 次
环境+版本
k8s: v1.21.1
hadoop: 3.2.2
dockerfile
FROM openjdk:8-jdk
# 如果要通过ssh连接容器内部,添加自己的公钥(非必须)
ARG SSH_PUB='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3nTRJ/aVb67l1xMaN36jmIbabU7Hiv/xpZ8bwLVvNO3Bj7kUzYTp7DIbPcHQg4d6EsPC6j91E8zW6CrV2fo2Ai8tDO/rCq9Se/64F3+8oEIiI6E/OfUZfXD1mPbG7M/kcA3VeQP6wxNPhWBbKRisqgUc6VTKhl+hK6LwRTZgeShxSNcey+HZst52wJxjQkNG+7CAEY5bbmBzAlHCSl4Z0RftYTHR3q8LcEg7YLNZasUogX68kBgRrb+jw1pRMNo7o7RI9xliDAGX+E4C3vVZL0IsccKgr90222axsADoEjC9O+Q6uwKjahemOVaau+9sHIwkelcOcCzW5SuAwkezv 805899926@qq.com'
RUN apt-get update;
RUN apt-get install -y openssh-server net-tools vim git;
RUN sed -i -r 's/^\s*UseDNS\s+\w+/#\0/; s/^\s*PasswordAuthentication\s+\w+/#\0/; s/^\s*ClientAliveInterval\s+\w+/#\0/' /etc/ssh/sshd_config;
RUN echo 'UseDNS no \nPermitRootLogin yes \nPasswordAuthentication yes \nClientAliveInterval 30' >> /etc/ssh/sshd_config;
RUN cat /etc/ssh/sshd_config
RUN su root bash -c 'cd;mkdir .ssh;chmod 700 .ssh;echo ${SSH_PUB} > .ssh/authorized_keys;chmod 644 .ssh/authorized_keys'
RUN su root bash -c 'cd;ssh-keygen -t rsa -f ~/.ssh/id_rsa; cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys'
# hadoop
ENV HADOOP_TGZ_URL=https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.2.2/hadoop-3.2.2.tar.gz
ENV HADOOP_HOME=/opt/hadoop
ENV PATH=$HADOOP_HOME/bin:$PATH
RUN set -ex; \
mkdir -p $HADOOP_HOME; \
wget -nv -O $HADOOP_HOME/src.tgz $HADOOP_TGZ_URL; \
tar -xf $HADOOP_HOME/src.tgz --strip-components=1 -C $HADOOP_HOME; \
rm $HADOOP_HOME/src.tgz; \
chown -R root:root $HADOOP_HOME; \
RUN mkdir -p $HADOOP_HOME/hdfs/name/ && mkdir -p $HADOOP_HOME/hdfs/data/
# clean trash file or dir
RUN rm -rf $HADOOP_HOME/share/doc/;
COPY docker-entrypoint.sh /
EXPOSE 22 9870 9000
ENTRYPOINT ["/docker-entrypoint.sh"]
docker-entrypoint.sh
#!/bin/bash
set -e
service ssh start
hdfs_dir=$HADOOP_HOME/hdfs/
if [ $HADOOP_NODE_TYPE = "datanode" ]; then
echo -e "\033[32m start datanode \033[0m"
$HADOOP_HOME/bin/hdfs datanode -regular
fi
if [ $HADOOP_NODE_TYPE = "namenode" ]; then
if [ -z $(ls -A ${hdfs_dir}) ]; then
echo -e "\033[32m start hdfs namenode format \033[0m"
$HADOOP_HOME/bin/hdfs namenode -format
fi
echo -e "\033[32m start hdfs namenode \033[0m"
$HADOOP_HOME/bin/hdfs namenode
pod template
apiVersion: v1
kind: ConfigMap
metadata:
name: hadoop
namespace: big-data
labels:
app: hadoop
data:
hadoop-env.sh: |
export HDFS_DATANODE_USER=root
export HDFS_NAMENODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export JAVA_HOME=/usr/local/openjdk-8
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib/native"
core-site.xml: |
fs.defaultFS
hdfs://hadoop-master:9000
dfs.namenode.rpc-bind-host
0.0.0.0
hdfs-site.xml: |
dfs.namenode.name.dir
file:///opt/hadoop/hdfs/name
dfs.datanode.data.dir
file:///opt/hadoop/hdfs/data
dfs.namenode.datanode.registration.ip-hostname-check
false
dfs.replication
1
---
# namenode svc
apiVersion: v1
kind: Service
metadata:
name: hadoop-master
namespace: big-data
spec:
selector:
app: hadoop-namenode
type: NodePort
ports:
- name: rpc
port: 9000
targetPort: 9000
- name: http
port: 9870
targetPort: 9870
nodePort: 9870
# namenode pod
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hadoop-namenode
namespace: big-data
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: hadoop-namenode
template:
metadata:
labels:
app: hadoop-namenode
spec:
volumes:
- name: hadoop-env
configMap:
name: hadoop
items:
- key: hadoop-env.sh
path: hadoop-env.sh
- name: core-site
configMap:
name: hadoop
items:
- key: core-site.xml
path: core-site.xml
- name: hdfs-site
configMap:
name: hadoop
items:
- key: hdfs-site.xml
path: hdfs-site.xml
- name: hadoop-data
persistentVolumeClaim:
claimName: data-hadoop-namenode
containers:
- name: hadoop
image: registry:5000/hadoop
imagePullPolicy: Always
ports:
- containerPort: 22
- containerPort: 9000
- containerPort: 9870
volumeMounts:
- name: hadoop-env
mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh
subPath: hadoop-env.sh
- name: core-site
mountPath: /opt/hadoop/etc/hadoop/core-site.xml
subPath: core-site.xml
- name: hdfs-site
mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
subPath: hdfs-site.xml
- name: hadoop-data
mountPath: /opt/hadoop/hdfs/
subPath: hdfs
- name: hadoop-data
mountPath: /opt/hadoop/logs/
subPath: logs
env:
- name: HADOOP_NODE_TYPE
value: namenode
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-hadoop-namenode
namespace: big-data
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 256Gi
storageClassName: "managed-nfs-storage"
# datanode pod
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: hadoop-datanode
namespace: big-data
spec:
replicas: 2
selector:
matchLabels:
app: hadoop-datanode
serviceName: hadoop-datanode
template:
metadata:
labels:
app: hadoop-datanode
spec:
volumes:
- name: hadoop-env
configMap:
name: hadoop
items:
- key: hadoop-env.sh
path: hadoop-env.sh
- name: core-site
configMap:
name: hadoop
items:
- key: core-site.xml
path: core-site.xml
- name: hdfs-site
configMap:
name: hadoop
items:
- key: hdfs-site.xml
path: hdfs-site.xml
containers:
- name: hadoop
image: registry:5000/hadoop
imagePullPolicy: Always
ports:
- containerPort: 22
- containerPort: 9000
- containerPort: 9870
volumeMounts:
- name: hadoop-env
mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh
subPath: hadoop-env.sh
- name: core-site
mountPath: /opt/hadoop/etc/hadoop/core-site.xml
subPath: core-site.xml
- name: hdfs-site
mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
subPath: hdfs-site.xml
- name: data
mountPath: /opt/hadoop/hdfs/
subPath: hdfs
- name: data
mountPath: /opt/hadoop/logs/
subPath: logs
env:
- name: HADOOP_NODE_TYPE
value: datanode
volumeClaimTemplates:
- metadata:
name: data
namespace: big-data
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 256Gi
storageClassName: "managed-nfs-storage"
您可能感兴趣的文章
- 12-20Kubernetes中使用临时容器进行故障排查的方法
- 12-20Nginx设置HTTPS的方法步骤
- 12-20二进制方式安装 Kubernetes1.18.3版本实现脚本
- 12-20Nginx工作模式及代理配置的使用细节
- 12-20ZooKeeper分布式协调服务设计核心概念及安装配置
- 12-20Kubernetes部署可视化地图的十个步骤
- 12-20关于docker清理Overlay2占用磁盘空间的问题(亲测有效)
- 12-20Docker compose配置文件写法及命令使用示例
- 12-20openwrt安装docker并启动的操作方法
- 12-20云原生Kubernetes初始化容器Init使用教程


阅读排行
推荐教程
- 12-07一文教你怎么选择Tomcat对应的JDK版本
- 12-07新版Eclipse集成Tomcat时找不到server选项的解决方法
- 12-06IIS7 应用程序池自动回收关闭的解决方案
- 12-05Windows Server 2019安装VMware
- 12-05Windows服务器默认IE浏览器无法下载文件的解决方法
- 12-05Docker安装Jenkins全过程
- 12-19Zabbix SAML SSO 登录绕过漏洞的操作流程
- 12-15Docker-Compose搭建Spark集群的实现方法
- 12-14Docker Desktop无法正常启动解决(failed to start...)
- 12-14k8s 与docker空间使用分析与清理方法





