0. CDH6.3.2 升级spark参考

CDH6.3.2 升级spark参考

# 软件版本:jdk-1.8、maven-3.8.6、scala-2.12.15 、spark-3.3.4
# 说明:maven 和 scala 请不要改变小版本,如果要改变,请改动 pom 中相应的版本号,否则编译时会有版本错误
wget  http://distfiles.macports.org/scala2.12/scala-2.12.15.tgz
wget  https://archive.apache.org/dist/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.tar.gz
wget  https://archive.apache.org/dist/spark/spark-3.3.4/spark-3.3.4.tgz

tar -zxvf scala-2.12.15.tgz -C /opt/
tar -zxvf apache-maven-3.8.6-bin.tar.gz -C /opt/
tar -zxvf spark-3.3.4.tgz -C /opt/
tar -zxvf scala-2.12.15.tgz -C /opt/spark-3.3.4/build/
tar -zxvf apache-maven-3.8.6-bin.tar.gz -C /opt/spark-3.3.4/build/

# 把压缩包放在 /opt 目录,全部解压,设置 jdk、scala、maven 的环境变量
#  vim /etc/profile

export JAVA_HOME=/usr/java/jdk1.8.0_361-amd64
export HADOOP_CONF_DIR=/etc/hadoop/conf
export HADOOP_HOME=/opt/cloudera/parcels/CDH/lib/hadoop
export HADOOP_CLASSPATH=`hadoop classpath`
export MAVEN_HOME=/opt/apache-maven-3.8.6
export SCALA_HOME=/opt/scala-2.12.15
export PATH=$JAVA_HOME/bin:$PATH:$SCALA_HOME/bin:$HADOOP_CONF_DIR:$HADOOP_HOME:$MAVEN_HOME/bin

# 编译 Spark3
# 修改 spark3 的 pom 配置 /opt/spark-3.3.4/pom.xml,增加 cloudera maven 仓库。
# 在 repositories 标签下,新增
        <repository>
            <id>aliyun</id>
            <url>https://maven.aliyun.com/nexus/content/groups/public</url>
            <releases>
                <enabled>true</enabled>
            </releases>
            <snapshots>
                <enabled>false</enabled>
            </snapshots>
        </repository>
        <repository>
            <id>cloudera</id>
            <url>https://repository.cloudera.com/artifactory/cloudera-repos</url>
            <releases>
                <enabled>true</enabled>
            </releases>
            <snapshots>
                <enabled>false</enabled>
            </snapshots>
        </repository>

# 修改 pom 文件中的 Hadoop 版本
    <!-- <hadoop.version>3.3.2</hadoop.version> -->
    <hadoop.version>3.0.0-cdh6.3.2</hadoop.version>

# 修改 make-distribution.sh
vim /opt/spark-3.3.4/dev/make-distribution.sh
### 修改 maven 编译内存
export MAVEN_OPTS="-Xmx8g -XX:ReservedCodeCacheSize=2g"
### 指定 mvn 为我们装的 maven 环境
MVN="/opt/apache-maven-3.8.6/bin/mvn"

# 重置 scala 版本
cd  /opt/spark-3.3.4
ls -l ./dev/change-scala-version.sh
# 执行脚本
./dev/change-scala-version.sh 2.12

# 开始编译
./dev/make-distribution.sh --name 3.0.0-cdh6.3.2 --tgz  -Pyarn -Phadoop-3.0 -Phive -Phive-thriftserver -Dhadoop.version=3.0.0-cdh6.3.2 -X

# 用的是 spark 的 make-distribution.sh 脚本进行编译,这个脚本其实也是用 maven 编译的,
–tgz 指定以 tgz 结尾
–name 后面跟的是 Hadoop 的版本,在后面生成的 tar 包带的版本号
-Pyarn 是基于 yarn
-Dhadoop.version=3.0.0-cdh6.3.2 指定 Hadoop 的版本。

# 编译成功后的压缩包:
ll /opt/spark-3.3.4/spark-3.3.4-bin-3.0.0-cdh6.3.2.tgz
-rw-r--r-- 1 root root 266773599 11月 20 18:08 /opt/spark-3.3.4/spark-3.3.4-bin-3.0.0-cdh6.3.2.tgz

# 部署 Spark3 客户端
# 上传到要部署 spark3 的客户端机器
tar -zxvf spark-3.3.4-bin-3.0.0-cdh6.3.2.tgz -C /opt/cloudera/parcels/CDH/lib
cd /opt/cloudera/parcels/CDH/lib
mv spark-3.3.4-bin-3.0.0-cdh6.3.2 spark3

# 将 CDH 集群的 spark-env.sh 复制到 /opt/cloudera/parcels/CDH/lib/spark3/conf 下:
cp /etc/spark/conf/spark-env.sh  /opt/cloudera/parcels/CDH/lib/spark3/conf
chmod +x /opt/cloudera/parcels/CDH/lib/spark3/conf/spark-env.sh

#修改 spark-env.sh
vim /opt/cloudera/parcels/CDH/lib/spark3/conf/spark-env.sh
#export SPARK_HOME=/opt/cloudera/parcels/CDH-6.3.2-1.cdh6.3.2.p0.1605554/lib/spark
#HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-$SPARK_CONF_DIR/yarn-conf}
export SPARK_HOME=/opt/cloudera/parcels/CDH/lib/spark3
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop/conf}

# 将 gateway 节点的 hive-site.xml 复制到 spark3/conf 目录下,不需要做变动:
cp /etc/hive/conf/hive-site.xml /opt/cloudera/parcels/CDH/lib/spark3/conf/

# 创建 spark-sql
vim /opt/cloudera/parcels/CDH/bin/spark-sql

#!/bin/bash 
export HADOOP_CONF_DIR=/etc/hadoop/conf
export YARN_CONF_DIR=/etc/hadoop/conf
SOURCE="${BASH_SOURCE[0]}"  
BIN_DIR="$( dirname "$SOURCE" )"  
while [ -h "$SOURCE" ]  
do  
 SOURCE="$(readlink "$SOURCE")"  
 [[ $SOURCE != /* ]] && SOURCE="$BIN_DIR/$SOURCE"  
 BIN_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"  
done  
BIN_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"  
LIB_DIR=$BIN_DIR/../lib  
export HADOOP_HOME=$LIB_DIR/hadoop  
# Autodetect JAVA_HOME if not defined  
. $LIB_DIR/bigtop-utils/bigtop-detect-javahome
exec $LIB_DIR/spark3/bin/spark-submit --class org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver "$@"

# 配置 spark-sql 快捷方式
chmod +x /opt/cloudera/parcels/CDH/bin/spark-sql
alternatives --install /usr/bin/spark-sql spark-sql /opt/cloudera/parcels/CDH/bin/spark-sql 1

# 配置 conf
cd /opt/cloudera/parcels/CDH/lib/spark3/conf
## 开启日志
mv log4j2.properties.template log4j2.properties
## spark-defaults.conf 配置
cp /opt/cloudera/parcels/CDH/lib/spark/conf/spark-defaults.conf ./

# 修改 spark-defaults.conf
vim /opt/cloudera/parcels/CDH/lib/spark3/conf/spark-defaults.conf
# 删除 
spark.extraListeners、spark.sql.queryExecutionListeners、spark.yarn.jars
# 添加 
spark.yarn.jars=hdfs://ns1/user/spark/spark3/jars/*

hadoop fs -mkdir -p /user/spark/spark3/jars
cd /opt/cloudera/parcels/CDH/lib/spark3/jars
hadoop fs -put *.jar /user/spark/spark3/jars

# 创建 spark3-submit
vim /opt/cloudera/parcels/CDH/bin/spark3-submit

#!/usr/bin/env bash
export HADOOP_CONF_DIR=/etc/hadoop/conf
export YARN_CONF_DIR=/etc/hadoop/conf
SOURCE="${BASH_SOURCE[0]}"
BIN_DIR="$( dirname "$SOURCE" )"
while [ -h "$SOURCE" ]
do
 SOURCE="$(readlink "$SOURCE")"
 [[ $SOURCE != /* ]] && SOURCE="$BIN_DIR/$SOURCE"
 BIN_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
done
BIN_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
LIB_DIR=/opt/cloudera/parcels/CDH/lib
export HADOOP_HOME=$LIB_DIR/hadoop
# Autodetect JAVA_HOME if not defined
. $LIB_DIR/bigtop-utils/bigtop-detect-javahome
# disable randomized hash for string in Python 3.3+
export PYTHONHASHSEED=0
exec $LIB_DIR/spark3/bin/spark-class org.apache.spark.deploy.SparkSubmit "$@"

# 配置 spark3-submit 快捷方式
chmod +x /opt/cloudera/parcels/CDH/bin/spark3-submit
alternatives --install /usr/bin/spark3-submit spark3-submit /opt/cloudera/parcels/CDH/bin/spark3-submit 1

# 分发
cd /opt/cloudera/parcels/CDH/lib/spark3/
scp -r spark3 kube-39:/opt/cloudera/parcels/CDH/lib/
scp -r spark3 kube-40:/opt/cloudera/parcels/CDH/lib/
scp -r spark3 kube-41:/opt/cloudera/parcels/CDH/lib/

# 测试 spark3-submit
spark3-submit --conf "spark.default.parallelism=100" --class org.apache.spark.examples.SparkPi --master yarn --deploy-mode cluster --driver-memory 8g --executor-memory 4g --executor-cores 4 --num-executors 3 --queue root.default /opt/cloudera/parcels/CDH/lib/spark3/examples/jars/spark-examples*.jar 1000

# 注意事项
# 如果有启用动态资源分配(Spark Dynamic Allocation),此时会有下列报错
java FetchFailed(BlockManagerId(2, n6, 7337, None), shuffleId=57, mapId=136, reduceId=0, message=
org.apache.spark.shuffle.FetchFailedException
    at org.apache.spark.errors.SparkCoreErrors$.fetchFailedError(SparkCoreErrors.scala:312)
    at org.apache.spark.storage.ShuffleBlockFetcherIterator.throwFetchFailedException(ShuffleBlockFetcherIterator.scala:1169)
    at org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:904)
    at org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:85)
    at org.apache.spark.util.CompletionIterator.next(CompletionIterator.scala:29)
    .......
Caused by: java.lang.RuntimeException: java.lang.IllegalArgumentException: Unknown message type: 9
    at org.apache.spark.network.shuffle.protocol.BlockTransferMessage$Decoder.fromByteBuffer(BlockTransferMessage.java:71)
    at org.apache.spark.network.shuffle.ExternalShuffleBlockHandler.receive(ExternalShuffleBlockHandler.java:81)
    at org.apache.spark.network.server.TransportRequestHandler.processRpcRequest(TransportRequestHandler.java:150)
    ......

# 需要在 spark-defaults.conf 中添加 useOldFetchProtocol 配置
spark.shuffle.useOldFetchProtocol=true

# 这样 Hadoop 集群即有了 CDH 版本的 Spark-2.4.0 又有了 apache 版本的 spark-3.3.4