網(wǎng)站首頁(yè) 編程語(yǔ)言 正文
環(huán)境+版本
k8s: v1.21.1
hadoop: 3.2.2
dockerfile
FROM openjdk:8-jdk # 如果要通過(guò)ssh連接容器內(nèi)部,添加自己的公鑰(非必須) ARG SSH_PUB='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3nTRJ/aVb67l1xMaN36jmIbabU7Hiv/xpZ8bwLVvNO3Bj7kUzYTp7DIbPcHQg4d6EsPC6j91E8zW6CrV2fo2Ai8tDO/rCq9Se/64F3+8oEIiI6E/OfUZfXD1mPbG7M/kcA3VeQP6wxNPhWBbKRisqgUc6VTKhl+hK6LwRTZgeShxSNcey+HZst52wJxjQkNG+7CAEY5bbmBzAlHCSl4Z0RftYTHR3q8LcEg7YLNZasUogX68kBgRrb+jw1pRMNo7o7RI9xliDAGX+E4C3vVZL0IsccKgr90222axsADoEjC9O+Q6uwKjahemOVaau+9sHIwkelcOcCzW5SuAwkezv 805899926@qq.com' RUN apt-get update; RUN apt-get install -y openssh-server net-tools vim git; RUN sed -i -r 's/^\s*UseDNS\s+\w+/#\0/; s/^\s*PasswordAuthentication\s+\w+/#\0/; s/^\s*ClientAliveInterval\s+\w+/#\0/' /etc/ssh/sshd_config; RUN echo 'UseDNS no \nPermitRootLogin yes \nPasswordAuthentication yes \nClientAliveInterval 30' >> /etc/ssh/sshd_config; RUN cat /etc/ssh/sshd_config RUN su root bash -c 'cd;mkdir .ssh;chmod 700 .ssh;echo ${SSH_PUB} > .ssh/authorized_keys;chmod 644 .ssh/authorized_keys' RUN su root bash -c 'cd;ssh-keygen -t rsa -f ~/.ssh/id_rsa; cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys' # hadoop ENV HADOOP_TGZ_URL=https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.2.2/hadoop-3.2.2.tar.gz ENV HADOOP_HOME=/opt/hadoop ENV PATH=$HADOOP_HOME/bin:$PATH RUN set -ex; \ mkdir -p $HADOOP_HOME; \ wget -nv -O $HADOOP_HOME/src.tgz $HADOOP_TGZ_URL; \ tar -xf $HADOOP_HOME/src.tgz --strip-components=1 -C $HADOOP_HOME; \ rm $HADOOP_HOME/src.tgz; \ chown -R root:root $HADOOP_HOME; \ RUN mkdir -p $HADOOP_HOME/hdfs/name/ && mkdir -p $HADOOP_HOME/hdfs/data/ # clean trash file or dir RUN rm -rf $HADOOP_HOME/share/doc/; COPY docker-entrypoint.sh / EXPOSE 22 9870 9000 ENTRYPOINT ["/docker-entrypoint.sh"]
docker-entrypoint.sh
#!/bin/bash set -e service ssh start hdfs_dir=$HADOOP_HOME/hdfs/ if [ $HADOOP_NODE_TYPE = "datanode" ]; then echo -e "\033[32m start datanode \033[0m" $HADOOP_HOME/bin/hdfs datanode -regular fi if [ $HADOOP_NODE_TYPE = "namenode" ]; then if [ -z $(ls -A ${hdfs_dir}) ]; then echo -e "\033[32m start hdfs namenode format \033[0m" $HADOOP_HOME/bin/hdfs namenode -format fi echo -e "\033[32m start hdfs namenode \033[0m" $HADOOP_HOME/bin/hdfs namenode
pod template
apiVersion: v1 kind: ConfigMap metadata: name: hadoop namespace: big-data labels: app: hadoop data: hadoop-env.sh: | export HDFS_DATANODE_USER=root export HDFS_NAMENODE_USER=root export HDFS_SECONDARYNAMENODE_USER=root export JAVA_HOME=/usr/local/openjdk-8 export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib/native" core-site.xml: |hdfs-site.xml: | fs.defaultFS hdfs://hadoop-master:9000 dfs.namenode.rpc-bind-host 0.0.0.0 --- # namenode svc apiVersion: v1 kind: Service metadata: name: hadoop-master namespace: big-data spec: selector: app: hadoop-namenode type: NodePort ports: - name: rpc port: 9000 targetPort: 9000 - name: http port: 9870 targetPort: 9870 nodePort: 9870 # namenode pod --- apiVersion: apps/v1 kind: Deployment metadata: name: hadoop-namenode namespace: big-data spec: strategy: type: Recreate selector: matchLabels: app: hadoop-namenode template: metadata: labels: app: hadoop-namenode spec: volumes: - name: hadoop-env configMap: name: hadoop items: - key: hadoop-env.sh path: hadoop-env.sh - name: core-site configMap: name: hadoop items: - key: core-site.xml path: core-site.xml - name: hdfs-site configMap: name: hadoop items: - key: hdfs-site.xml path: hdfs-site.xml - name: hadoop-data persistentVolumeClaim: claimName: data-hadoop-namenode containers: - name: hadoop image: registry:5000/hadoop imagePullPolicy: Always ports: - containerPort: 22 - containerPort: 9000 - containerPort: 9870 volumeMounts: - name: hadoop-env mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh subPath: hadoop-env.sh - name: core-site mountPath: /opt/hadoop/etc/hadoop/core-site.xml subPath: core-site.xml - name: hdfs-site mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml subPath: hdfs-site.xml - name: hadoop-data mountPath: /opt/hadoop/hdfs/ subPath: hdfs - name: hadoop-data mountPath: /opt/hadoop/logs/ subPath: logs env: - name: HADOOP_NODE_TYPE value: namenode --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: data-hadoop-namenode namespace: big-data spec: accessModes: - ReadWriteMany resources: requests: storage: 256Gi storageClassName: "managed-nfs-storage" # datanode pod --- apiVersion: apps/v1 kind: StatefulSet metadata: name: hadoop-datanode namespace: big-data spec: replicas: 2 selector: matchLabels: app: hadoop-datanode serviceName: hadoop-datanode template: metadata: labels: app: hadoop-datanode spec: volumes: - name: hadoop-env configMap: name: hadoop items: - key: hadoop-env.sh path: hadoop-env.sh - name: core-site configMap: name: hadoop items: - key: core-site.xml path: core-site.xml - name: hdfs-site configMap: name: hadoop items: - key: hdfs-site.xml path: hdfs-site.xml containers: - name: hadoop image: registry:5000/hadoop imagePullPolicy: Always ports: - containerPort: 22 - containerPort: 9000 - containerPort: 9870 volumeMounts: - name: hadoop-env mountPath: /opt/hadoop/etc/hadoop/hadoop-env.sh subPath: hadoop-env.sh - name: core-site mountPath: /opt/hadoop/etc/hadoop/core-site.xml subPath: core-site.xml - name: hdfs-site mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml subPath: hdfs-site.xml - name: data mountPath: /opt/hadoop/hdfs/ subPath: hdfs - name: data mountPath: /opt/hadoop/logs/ subPath: logs env: - name: HADOOP_NODE_TYPE value: datanode volumeClaimTemplates: - metadata: name: data namespace: big-data spec: accessModes: - ReadWriteMany resources: requests: storage: 256Gi storageClassName: "managed-nfs-storage" dfs.namenode.name.dir file:///opt/hadoop/hdfs/name dfs.datanode.data.dir file:///opt/hadoop/hdfs/data dfs.namenode.datanode.registration.ip-hostname-check false dfs.replication 1
原文鏈接:https://www.cnblogs.com/chenzhaoyu/p/15141679.html
相關(guān)推薦
- 2022-05-23 Python學(xué)習(xí)之sys模塊使用教程詳解_python
- 2022-06-22 C#使用Dictionary<string,?string>拆分字符串與記錄log方法_
- 2022-11-12 C++中的數(shù)組、鏈表與哈希表_C 語(yǔ)言
- 2022-04-11 Python - logging.Formatter 的常用格式字符串
- 2022-05-11 Redis之RedisTemplate配置方式(序列和反序列化)_Redis
- 2022-06-24 Python抽象類應(yīng)用詳情_(kāi)python
- 2022-04-23 python中的迭代器,生成器與裝飾器詳解_python
- 2022-01-26 使用Guzzle拓展包請(qǐng)求接口失敗重試
- 最近更新
-
- window11 系統(tǒng)安裝 yarn
- 超詳細(xì)win安裝深度學(xué)習(xí)環(huán)境2025年最新版(
- Linux 中運(yùn)行的top命令 怎么退出?
- MySQL 中decimal 的用法? 存儲(chǔ)小
- get 、set 、toString 方法的使
- @Resource和 @Autowired注解
- Java基礎(chǔ)操作-- 運(yùn)算符,流程控制 Flo
- 1. Int 和Integer 的區(qū)別,Jav
- spring @retryable不生效的一種
- Spring Security之認(rèn)證信息的處理
- Spring Security之認(rèn)證過(guò)濾器
- Spring Security概述快速入門
- Spring Security之配置體系
- 【SpringBoot】SpringCache
- Spring Security之基于方法配置權(quán)
- redisson分布式鎖中waittime的設(shè)
- maven:解決release錯(cuò)誤:Artif
- restTemplate使用總結(jié)
- Spring Security之安全異常處理
- MybatisPlus優(yōu)雅實(shí)現(xiàn)加密?
- Spring ioc容器與Bean的生命周期。
- 【探索SpringCloud】服務(wù)發(fā)現(xiàn)-Nac
- Spring Security之基于HttpR
- Redis 底層數(shù)據(jù)結(jié)構(gòu)-簡(jiǎn)單動(dòng)態(tài)字符串(SD
- arthas操作spring被代理目標(biāo)對(duì)象命令
- Spring中的單例模式應(yīng)用詳解
- 聊聊消息隊(duì)列,發(fā)送消息的4種方式
- bootspring第三方資源配置管理
- GIT同步修改后的遠(yuǎn)程分支