部署zookeeper
节点 128 129 130
- 部署前提安装jdk
传送门: zookeeper3.4.14
[root@ceshi-128 local]# java -version
java version "1.8.0_221"
Java(TM) SE Runtime Environment (build 1.8.0_221-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.221-b11, mixed mode)
下载并配置
[root@ceshi-128 ~]# tar -xf zookeeper-3.4.14.tar.gz -C /usr/local/
[root@ceshi-128 ~]# cd /usr/local/
[root@ceshi-128 local]# ln -s /usr/local/zookeeper-3.4.14/ /usr/local/zookeeper
[root@ceshi-128 conf]# pwd
/usr/local/zookeeper/conf
[root@ceshi-128 conf]# cp zoo_sample.cfg zoo.cfg
配置参数
[root@ceshi-128 conf]# vi zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/data/zookeeper/data
dataLogDir=/data/zookeeper/logs
# the port at which the clients will connect
clientPort=2181
server.1=zk1.od.com:2888:3888
server.2=zk2.od.com:2888:3888
server.3=zk3.od.com:2888:3888
[root@ceshi-128 conf]# mkdir -p /data/zookeeper/data
[root@ceshi-128 conf]# mkdir -p /data/zookeeper/logs
添加DNS od.com.解析让以上域名解析成功
配置集群myid
[root@ceshi-128 conf]# vi /data/zookeeper/data/myid
1
[root@ceshi-129 conf]# vi /data/zookeeper/data/myid
2
[root@ceshi-130 conf]# vi /data/zookeeper/data/myid
3
[root@ceshi-128 bin]# /usr/local/zookeeper/bin/zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@ceshi-128 bin]# netstat -tnlp | grep 2181
tcp 0 0 0.0.0.0:2181 0.0.0.0:* LISTEN 55304/java
130节点为master
[root@ceshi-130 bin]# ./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Mode: leader
部署jenkins至k8s集群
镜像传送门:dockerhub
节点 132
[root@ceshi-132 ~]# docker pull jenkins/jenkins:v2.222.4
2.263.4: Pulling from jenkins/jenkins
9a0b0ce99936: Pull complete
db3b6004c61a: Pull complete
4e96cf3bdc20: Pull complete
e47bd954be8f: Pull complete
b2d9d6b1cd91: Pull complete
fa537a81cda1: Pull complete
Digest: sha256:64576b8bd0a7f5c8ca275f4926224c29e7aa3f3167923644ec1243cd23d611f3
Status: Downloaded newer image for jenkins/jenkins:v2.222.4
docker.io/jenkins/jenkins:v2.222.4
[root@ceshi-132 ~]# docker tag 22b8b9a84dbe harbor.od.com/public/jenkins:v2.222.4
[root@ceshi-132 ~]# docker push harbor.od.com/public/jenkins:v2.222.4
The push refers to repository [harbor.od.com/public/jenkins]
e0485b038afa: Pushed
2950fdd45d03: Pushed
6ce697717948: Pushed
911119b5424d: Pushed
b8f8aeff56a8: Pushed
97041f29baff: Pushed
v2.190.3: digest: sha256:64576b8bd0a7f5c8ca275f4926224c29e7aa3f3167923644ec1243cd23d611f3 size: 4087
配置dockerfile
[root@ceshi-132 ~]# ssh-keygen -t rsa -b 2048 -C "liu_jiangxu@163.com" -N "" -f /root/.ssh/id_rsa
Generating public/private rsa key pair.
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:jo0UhlYUk+xszsNIpHt64iUvasvyWSzTaZAE7Xhcfd4 liu_jiangxu@163.com
The key's randomart image is:
+---[RSA 2048]----+
|.. +=o |
| .. oo+.. |
|o.+X. |
|+*=+. |
+----[SHA256]-----+
[root@ceshi-132 ~]# mkdir -p /data/dockerfile/jenkins/
[root@ceshi-132 jenkins]# vi Dockerfile
获取jenkins镜像
FROM harbor.od.com/public/jenkins:v2.222.4
使用root用户执行
USER root
拷贝时区到容器
RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime &&\
echo 'Asia/Shanghai' >/etc/timezone
添加密钥文件到容器
ADD id_rsa /root/.ssh/id_rsa
加入登陆私有仓库文件
ADD config.json /root/.docker/config.json
安装docker客户端
ADD get-docker.sh /get-docker.sh
修改ssh客户端认证
RUN echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config &&\
/get-docker.sh
拷贝密钥到当前目录
[root@ceshi-132 jenkins]# cp ~/.ssh/id_rsa .
拷贝docker配置到当前目录
[root@ceshi-132 jenkins]# cp ~/.docker/config.json .
[root@ceshi-132 jenkins]# wget https://get.docker.com/
[root@ceshi-132 jenkins]# chmod +x get-docker.sh
[root@ceshi-132 jenkins]# vi get-docker.sh
#!/bin/sh
set -e
# This script is meant for quick & easy install via:
# $ curl -fsSL get.docker.com -o get-docker.sh
# $ sh get-docker.sh
#
# For test builds (ie. release candidates):
# $ curl -fsSL test.docker.com -o test-docker.sh
# $ sh test-docker.sh
#
# NOTE: Make sure to verify the contents of the script
# you downloaded matches the contents of install.sh
# located at https://github.com/docker/docker-install
# before executing.
#
# Git commit from https://github.com/docker/docker-install when
# the script was uploaded (Should only be modified by upload job):
SCRIPT_COMMIT_SHA=e749601
# This value will automatically get changed for:
# * edge
# * test
# * experimental
DEFAULT_CHANNEL_VALUE="edge"
if [ -z "$CHANNEL" ]; then
CHANNEL=$DEFAULT_CHANNEL_VALUE
fi
DEFAULT_DOWNLOAD_URL="https://download.docker.com"
if [ -z "$DOWNLOAD_URL" ]; then
DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
fi
SUPPORT_MAP="
x86_64-centos-7
x86_64-fedora-26
x86_64-fedora-27
x86_64-debian-wheezy
x86_64-debian-jessie
x86_64-debian-stretch
x86_64-debian-buster
x86_64-ubuntu-trusty
x86_64-ubuntu-xenial
x86_64-ubuntu-artful
s390x-ubuntu-xenial
s390x-ubuntu-artful
ppc64le-ubuntu-xenial
ppc64le-ubuntu-artful
aarch64-ubuntu-xenial
aarch64-debian-jessie
aarch64-debian-stretch
aarch64-fedora-26
aarch64-fedora-27
aarch64-centos-7
armv6l-raspbian-jessie
armv7l-raspbian-jessie
armv6l-raspbian-stretch
armv7l-raspbian-stretch
armv7l-debian-jessie
armv7l-debian-stretch
armv7l-debian-buster
armv7l-ubuntu-trusty
armv7l-ubuntu-xenial
armv7l-ubuntu-artful
"
mirror=''
DRY_RUN=${DRY_RUN:-}
while [ $# -gt 0 ]; do
case "$1" in
--mirror)
mirror="$2"
shift
;;
--dry-run)
DRY_RUN=1
;;
--*)
echo "Illegal option $1"
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
case "$mirror" in
Aliyun)
DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
;;
AzureChinaCloud)
DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
;;
esac
command_exists() {
command -v "$@" > /dev/null 2>&1
}
is_dry_run() {
if [ -z "$DRY_RUN" ]; then
return 1
else
return 0
fi
}
deprecation_notice() {
distro=$1
date=$2
echo
echo "DEPRECATION WARNING:"
echo " The distribution, $distro, will no longer be supported in this script as of $date."
echo " If you feel this is a mistake please submit an issue at https://github.com/docker/docker-install/issues/new"
echo
sleep 10
}
get_distribution() {
lsb_dist=""
# Every system that we officially support has /etc/os-release
if [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
# Returning an empty string here should be alright since the
# case statements don't act unless you provide an actual value
echo "$lsb_dist"
}
add_debian_backport_repo() {
debian_version="$1"
backports="deb http://ftp.debian.org/debian $debian_version-backports main"
if ! grep -Fxq "$backports" /etc/apt/sources.list; then
(set -x; $sh_c "echo \"$backports\" >> /etc/apt/sources.list")
fi
}
echo_docker_as_nonroot() {
if is_dry_run; then
return
fi
if command_exists docker && [ -e /var/run/docker.sock ]; then
(
set -x
$sh_c 'docker version'
) || true
fi
your_user=your-user
[ "$user" != 'root' ] && your_user="$user"
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
echo "If you would like to use Docker as a non-root user, you should now consider"
echo "adding your user to the \"docker\" group with something like:"
echo
echo " sudo usermod -aG docker $your_user"
echo
echo "Remember that you will have to log out and back in for this to take effect!"
echo
echo "WARNING: Adding a user to the \"docker\" group will grant the ability to run"
echo " containers which can be used to obtain root privileges on the"
echo " docker host."
echo " Refer to https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface"
echo " for more information."
}
# Check if this is a forked Linux distro
check_forked() {
# Check for lsb_release command existence, it usually exists in forked distros
if command_exists lsb_release; then
# Check if the `-u` option is supported
set +e
lsb_release -a -u > /dev/null 2>&1
lsb_release_exit_code=$?
set -e
# Check if the command has exited successfully, it means we're in a forked distro
if [ "$lsb_release_exit_code" = "0" ]; then
# Print info about current distro
cat <<-EOF
You're using '$lsb_dist' version '$dist_version'.
EOF
# Get the upstream release info
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
# Print info about upstream distro
cat <<-EOF
Upstream release is '$lsb_dist' version '$dist_version'.
EOF
else
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
if [ "$lsb_dist" = "osmc" ]; then
# OSMC runs Raspbian
lsb_dist=raspbian
else
# We're Debian and don't even know it!
lsb_dist=debian
fi
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
9)
dist_version="stretch"
;;
8|'Kali Linux 2')
dist_version="jessie"
;;
7)
dist_version="wheezy"
;;
esac
fi
fi
fi
}
semverParse() {
major="${1%%.*}"
minor="${1#$major.}"
minor="${minor%%.*}"
patch="${1#$major.$minor.}"
patch="${patch%%[-.]*}"
}
ee_notice() {
echo
echo
echo " WARNING: $1 is now only supported by Docker EE"
echo " Check https://store.docker.com for information on Docker EE"
echo
echo
}
do_install() {
echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
if command_exists docker; then
docker_version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)"
MAJOR_W=1
MINOR_W=10
semverParse "$docker_version"
shouldWarn=0
if [ "$major" -lt "$MAJOR_W" ]; then
shouldWarn=1
fi
if [ "$major" -le "$MAJOR_W" ] && [ "$minor" -lt "$MINOR_W" ]; then
shouldWarn=1
fi
cat >&2 <<-'EOF'
Warning: the "docker" command appears to already exist on this system.
If you already have Docker installed, this script can cause trouble, which is
why we're displaying this warning and provide the opportunity to cancel the
installation.
If you installed the current Docker package using this script and are using it
EOF
if [ $shouldWarn -eq 1 ]; then
cat >&2 <<-'EOF'
again to update Docker, we urge you to migrate your image store before upgrading
to v1.10+.
You can find instructions for this here:
https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
EOF
else
cat >&2 <<-'EOF'
again to update Docker, you can safely ignore this message.
EOF
fi
cat >&2 <<-'EOF'
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo; then
sh_c='sudo -E sh -c'
elif command_exists su; then
sh_c='su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit 1
fi
fi
if is_dry_run; then
sh_c="echo"
fi
# perform some very rudimentary platform detection
lsb_dist=$( get_distribution )
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
case "$lsb_dist" in
ubuntu)
if command_exists lsb_release; then
dist_version="$(lsb_release --codename | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
fi
;;
debian|raspbian)
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
case "$dist_version" in
9)
dist_version="stretch"
;;
8)
dist_version="jessie"
;;
7)
dist_version="wheezy"
;;
esac
;;
centos)
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
rhel|ol|sles)
ee_notice "$lsb_dist"
exit 1
;;
*)
if command_exists lsb_release; then
dist_version="$(lsb_release --release | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
esac
# Check if this is a forked Linux distro
check_forked
# Check if we actually support this configuration
if ! echo "$SUPPORT_MAP" | grep "$(uname -m)-$lsb_dist-$dist_version" >/dev/null; then
cat >&2 <<-'EOF'
Either your platform is not easily detectable or is not supported by this
installer script.
Please visit the following URL for more detailed installation instructions:
https://docs.docker.com/engine/installation/
EOF
exit 1
fi
# Run setup for each distro accordingly
case "$lsb_dist" in
ubuntu|debian|raspbian)
pre_reqs="apt-transport-https ca-certificates curl"
if [ "$lsb_dist" = "debian" ]; then
if [ "$dist_version" = "wheezy" ]; then
add_debian_backport_repo "$dist_version"
fi
# libseccomp2 does not exist for debian jessie main repos for aarch64
if [ "$(uname -m)" = "aarch64" ] && [ "$dist_version" = "jessie" ]; then
add_debian_backport_repo "$dist_version"
fi
fi
# TODO: August 31, 2018 delete from here,
if [ "$lsb_dist" = "ubuntu" ] && [ "$dist_version" = "artful" ]; then
deprecation_notice "$lsb_dist $dist_version" "August 31, 2018"
fi
# TODO: August 31, 2018 delete to here,
if ! command -v gpg > /dev/null; then
pre_reqs="$pre_reqs gnupg"
fi
apt_repo="deb [arch=$(dpkg --print-architecture)] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
(
if ! is_dry_run; then
set -x
fi
$sh_c 'apt-get update -qq >/dev/null'
$sh_c "apt-get install -y -qq $pre_reqs >/dev/null"
$sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | apt-key add -qq - >/dev/null"
$sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
if [ "$lsb_dist" = "debian" ] && [ "$dist_version" = "wheezy" ]; then
$sh_c 'sed -i "/deb-src.*download\.docker/d" /etc/apt/sources.list.d/docker.list'
fi
$sh_c 'apt-get update -qq >/dev/null'
)
pkg_version=""
if [ ! -z "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
# Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g").*-0~$lsb_dist"
search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | cut -d' ' -f 4"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
echo
exit 1
fi
pkg_version="=$pkg_version"
fi
fi
(
if ! is_dry_run; then
set -x
fi
$sh_c "apt-get install -y -qq --no-install-recommends docker-ce$pkg_version >/dev/null"
)
echo_docker_as_nonroot
exit 0
;;
centos|fedora)
yum_repo="$DOWNLOAD_URL/linux/$lsb_dist/docker-ce.repo"
if [ "$lsb_dist" = "fedora" ]; then
if [ "$dist_version" -lt "26" ]; then
echo "Error: Only Fedora >=26 are supported"
exit 1
fi
pkg_manager="dnf"
config_manager="dnf config-manager"
enable_channel_flag="--set-enabled"
pre_reqs="dnf-plugins-core"
pkg_suffix="fc$dist_version"
else
pkg_manager="yum"
config_manager="yum-config-manager"
enable_channel_flag="--enable"
pre_reqs="yum-utils"
pkg_suffix="el"
fi
(
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q $pre_reqs"
$sh_c "$config_manager --add-repo $yum_repo"
if [ "$CHANNEL" != "stable" ]; then
$sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
fi
$sh_c "$pkg_manager makecache"
)
pkg_version=""
if [ ! -z "$VERSION" ]; then
if is_dry_run; then
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
else
pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
pkg_version="$($sh_c "$search_command")"
echo "INFO: Searching repository for VERSION '$VERSION'"
echo "INFO: $search_command"
if [ -z "$pkg_version" ]; then
echo
echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
echo
exit 1
fi
# Cut out the epoch and prefix with a '-'
pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
fi
fi
(
if ! is_dry_run; then
set -x
fi
$sh_c "$pkg_manager install -y -q docker-ce$pkg_version"
)
echo_docker_as_nonroot
exit 0
;;
esac
exit 1
}
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_install
构建dockerfile
- harbor新建infra私有项目
[root@ceshi-132 jenkins]# docker build . -t harbor.od.com/infra/jenkins:v2.222.4
[root@ceshi-132 jenkins]# docker push harbor.od.com/infra/jenkins:v2.222.4
创建名称空间
节点 130
[root@ceshi-130 bin]# kubectl create ns infra
namespace/infra created
授权集群拉取harbor私有项目
[root@ceshi-130 bin]# kubectl create secret docker-registry harbor --docker-server=harbor.od.com --docker-username=admin --docker-password=12345 -n infra
secret/harbor created
类型:docker-registry
名称: harbor
私仓库名称: infra
下载nfs
节点 130 131 132
[root@ceshi-130 bin]# yum install nfs-utils -y
配置nfs服务端
节点 132
[root@ceshi-132 ~]# vi /etc/exports
/data/nfsvolume 192.168.108.0/24(rw,no_root_squash)
[root@ceshi-132 ~]# mkdir /data/nfsvolume/jenkins_home
[root@ceshi-132 ~]# systemctl start nfs
[root@ceshi-132 ~]# systemctl enable nfs
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
创建jenkins资源清单
[root@ceshi-132 k8s-yaml]# mkdir jenkins
[root@ceshi-132 k8s-yaml]# cd jenkins/
[root@ceshi-132 jenkins]# cat dp.yaml
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: jenkins
namespace: infra
labels:
name: jenkins
spec:
replicas: 1
selector:
matchLabels:
name: jenkins
template:
metadata:
labels:
app: jenkins
name: jenkins
spec:
volumes:
- name: data
nfs:
server: ceshi-132.host.com
path: /data/nfsvolume/jenkins_home
- name: docker
hostPath:
path: /run/docker.sock
type: ''
containers:
- name: jenkins
image: harbor.od.com/infra/jenkins:v2.222.4
imagePullPolicy: IfNotPresent 如果本地没有,就去远程仓库拉镜像
ports:
- containerPort: 8080 容器端口
protocol: TCP 协议
env: 环境变量
- name: JAVA_OPTS
value: -Xmx512m -Xms512m 最小最大堆内存512m
volumeMounts: 挂载路径
- name: data
mountPath: /var/jenkins_home
- name: docker
mountPath: /run/docker.sock 将宿主机docker sock文件挂载到jenkins,也就是说在jenkins容器里就像在宿主机使用docker,查看docker镜像 运行容器都是和宿主机一样的效果
imagePullSecrets: 拉取私有仓库镜像必须加次参数否则拉不到
- name: harbor 声明名称
securityContext:
runAsUser: 0 root启动
strategy:
type: RollingUpdate 默认滚动升级
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 7
progressDeadlineSeconds: 600 启动失败超时时长
[root@ceshi-132 jenkins]# cat svc.yaml
kind: Service
apiVersion: v1
metadata:
name: jenkins
namespace: infra
spec:
ports:
- protocol: TCP
port: 80 集群网络端口
targetPort: 8080 容器端口
selector:
app: jenkins
[root@ceshi-132 jenkins]# cat ingress.yaml
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: jenkins
namespace: infra
spec:
rules:
- host: jenkins.od.com
http:
paths:
- path: /
backend:
serviceName: jenkins
servicePort: 80
构建pods
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/jenkins/dp.yaml
deployment.extensions/jenkins created
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/jenkins/svc.yaml
service/jenkins created
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/jenkins/ingress.yaml
ingress.extensions/jenkins created
验证pod
节点 130 131
- 测试git前提必须在镜像打包之前将公钥上传至gitee安全设置的SSH公钥(使用SSH公钥可以让你在你的电脑和 Gitee 通讯的时候使用安全连接)
[root@ceshi-130 ~]# kubectl get pod -n infra
NAME READY STATUS RESTARTS AGE
jenkins-698b4994c8-hm5wf 1/1 Running 0 5h21m
[root@ceshi-130 ~]# kubectl exec -it jenkins-698b4994c8-hm5wf bash -n infra
root@jenkins-698b4994c8-hm5wf:/# whoami
root
root@jenkins-698b4994c8-hm5wf:/# date
Wed Aug 18 16:33:53 CST 2021
测试以ssh连通性
root@jenkins-698b4994c8-hm5wf:/# ssh -i /root/.ssh/id_rsa -T git@gitee.com
Hi 刘江旭! You've successfully authenticated, but GITEE.COM does not provide shell access.
测试harbor仓库连通性
root@jenkins-698b4994c8-hm5wf:/# docker login harbor.od.com
Authenticating with existing credentials...
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Login Succeeded
部署maven
节点 132
传送门:maven:3.6.2
配置maven
[root@ceshi-132 ~]# mkdir /data/nfsvolume/jenkins_home/maven-3.6.2-8u242
[root@ceshi-132 ~]# tar xf apache-maven-3.6.2-bin.tar.gz
[root@ceshi-132 ~]# mv apache-maven-3.6.2/* /data/nfsvolume/jenkins_home/maven-3.6.2-8u242/
[root@ceshi-132 ~]# vi /data/nfsvolume/jenkins_home/maven-3.6.2-8u242/conf/settings.xml
<mirror>
<id>nexus-aliyun</id>
<mirrorOf>*</mirrorOf>
<name>Nexus aliyun</name>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
下载java运行所需镜像
[root@ceshi-132 ~]# docker pull stanleyws/jre8:8u112
[root@ceshi-132 ~]# docker tag fa3a085d6ef1 harbor.od.com/public/jre8:8u112
[root@ceshi-132 ~]# docker push harbor.od.com/public/jre8:8u112
The push refers to repository [harbor.od.com/public/jre8]
[root@ceshi-132 ~]# mkdir /data/dockerfile/jre8
[root@ceshi-132 jre8]# vi Dockerfile
来自私有仓库
FROM harbor.od.com/public/jre8:8u112
设置时区
RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime &&\
echo 'Asia/Shanghai' >/etc/timezone
添加监控配置文件
ADD config.yml /opt/prom/config.yml
收集jvm信息
ADD jmx_javaagent-0.3.1.jar /opt/prom/
工作目录
WORKDIR /opt/project_dir
docker运行默认启动脚本
ADD entrypoint.sh /entrypoint.sh
CMD ["/entrypoint.sh"]
[root@ceshi-132 jre8]# wget https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.3.1/jmx_prometheus_javaagent-0.3.1.jar -O jmx_javaagent-0.3.1.jar
[root@ceshi-132 jre8]# vi config.yml
---
rules:
- pattern: '.*'
[root@ceshi-132 jre8]# vi entrypoint.sh
#!/bin/sh
M_OPTS="-Duser.timezone=Asia/Shanghai -javaagent:/opt/prom/jmx_javaagent-0.3.1.jar=$(hostname -i):${M_PORT:-"12346"}:/opt/prom/config.yml"
C_OPTS=${C_OPTS}
JAR_BALL=${JAR_BALL}
exec java -jar ${M_OPTS} ${C_OPTS} ${JAR_BALL}
构建dockerfile
[root@ceshi-132 jre8]# docker build . -t harbor.od.com/base/jre8:8u112
Successfully built 7f36e75aac28
Successfully tagged harbor.od.com/base/jre8:8u112
[root@ceshi-132 jre8]# docker push harbor.od.com/base/jre8:8u112
8d4d1ab5ff74: Mounted from public/jre8
8u112: digest: sha256:72d4bd870605ae17f9f23e5cb9c453c34906d7ff86ce97c0c2ef89b68c1dcb6f size: 2405
下载Jenkins插件
Blue Ocean
Jenkins新建流水项目
New Item》pipeline》Configure》Discard old builds
New Item》pipeline》Configure》This project is parameterized
-
Add Parameter -> String Parameter
Name : app_name
Default Value :
Description : 项目名称 -
Add Parameter -> String Parameter
Name : image_name
Default Value :
Description : 镜像名称 -
Add Parameter -> String Parameter
Name : git_repo
Default Value :
Description : 项目所在git*仓库地址 -
Add Parameter -> String Parameter
Name : git_ver
Default Value :
Description : 项目在git*仓库所对应的分支或者版本号 -
Add Parameter -> String Parameter
Name : add_tag
Default Value :
Description : docker镜像标签时间部分 -
Add Parameter -> String Parameter
Name : mvn_dir
Default Value : ./
Description : 编译项目目录路径 -
Add Parameter -> String Parameter
Name : target_dir
Default Value : ./target
Description : 项目编译完成项目后产生的war/jar的目录 -
Add Parameter -> String Parameter
Name : mvn_cmd
Default Value : mvn clean package -Dmaven.test.skip=true
Description : 执行编译所用的命令 -
Add Parameter -> Choice Paramete
Name : base_image
Default Value :
base/jre7:7u80
base/jre8:8u112
Description : 项目使用底包做镜像 -
Add Parameter -> Choice Parameter
Name : maven
Default Value :
3.6.0-8u181
3.2.5-6u025
Description : 执行编译使用maven软件版本
Pipeline》
pipeline {
agent any
stages {
stage('pull') { //get project code from repo
steps {
sh "git clone ${params.git_repo} ${params.app_name}/${env.BUILD_NUMBER} && cd ${params.app_name}/${env.BUILD_NUMBER} && git checkout ${params.git_ver}"
}
}
stage('build') { //exec mvn cmd
steps {
sh "cd ${params.app_name}/${env.BUILD_NUMBER} && /var/jenkins_home/maven-${params.maven}/bin/${params.mvn_cmd}"
}
}
stage('package') { //move jar file into project_dir
steps {
sh "cd ${params.app_name}/${env.BUILD_NUMBER} && cd ${params.target_dir} && mkdir project_dir && mv *.jar ./project_dir"
}
}
stage('image') { //build image and push to registry
steps {
writeFile file: "${params.app_name}/${env.BUILD_NUMBER}/Dockerfile", text: """FROM harbor.od.com/${params.base_image}
ADD ${params.target_dir}/project_dir /opt/project_dir"""
sh "cd ${params.app_name}/${env.BUILD_NUMBER} && docker build -t harbor.od.com/${params.image_name}:${params.git_ver}_${params.add_tag} . && docker push harbor.od.com/${params.image_name}:${params.git_ver}_${params.add_tag}"
}
}
}
}
构建项目
编辑资源配置清单
节点 132
[root@ceshi-132 ~]# cd /data/k8s-yaml/
[root@ceshi-132 k8s-yaml]# mkdir dubbo-demo-service
[root@ceshi-132 k8s-yaml]# cd dubbo-demo-service/
[root@ceshi-132 dubbo-demo-service]# vi dp.yaml
kind: Deployment 定义资源类别,要创建的是POD就写为pod、Deployment、StatefulSet等
apiVersion: extensions/v1beta1 定义版本
metadata: 元数据信息,包含资源名称、namespace等。namespace用于给资源进行分类,默认会有一个default名称空间
name: dubbo-demo-service 名称
namespace: app 名称空间
labels: 标签
name: dubbo-demo-service 名称
spec: 声明资源的属性状态,也就是说希望deployment是什么样的
replicas: 1 副本数量
selector: 控制器选择器,通过他指定该控制器管理那些pod
matchLabels: labels匹配规则
name: dubbo-demo-service 名称
template: 模板,当副本数量不足根据下面模板创建pod副本
metadata: 元数据
labels: 标签
app: dubbo-demo-service 给自己打标签
name: dubbo-demo-service 名称
spec: 声明资源的属性状态,也就是说希望deployment是什么样的
containers: 创建容器
- name: dubbo-demo-service 名称
image: harbor.od.com/app/dubbo-demo-service:master_20210825_1139 镜像地址
ports: 端口设置
- containerPort: 20880 暴露20880端口
protocol: TCP 协议
env: 设置变量
- name: JAR_BALL JAR_BALL=dubbo-server.jar
value: dubbo-server.jar
imagePullPolicy: IfNotPresent 优先使用本地image,本地没有再去下载
imagePullSecrets: 引用创建secrets,私有仓库必须加此参数
- name: harbor 当时创建secret时的名称
restartPolicy: Always 容器停止运行时的重启策略
terminationGracePeriodSeconds: 30 等待pod缓冲时长,默认30秒,比如当你启动pod超过30秒还没running将被强制结束,此值根据实际业务
securityContext: 容器的进程都以UserID 0 的身份运行
runAsUser: 0 root方式启动
schedulerName: default-scheduler 调度运算节点:默认调度方式
strategy: 将现有pod替换为新pod的部署策略
type: RollingUpdate 滚动更新配置参数,仅当类型为RollingUpdate
rollingUpdate:
maxUnavailable: 1 和期望ready的副本数比例
maxSurge: 1 滚动更新过程产生的最大pod数量
revisionHistoryLimit: 7 定义保留的升级记录数
progressDeadlineSeconds: 600 滚动升级的最大时间600秒
kubectl添加名称空间
[root@ceshi-130 ~]# kubectl create namespace app
namespace/app created
kubectl授权私有仓库镜像拉取权限
[root@ceshi-130 ~]# kubectl create secret docker-registry harbor --docker-server=harbor.od.com --docker-username=admin --docker-password=12345 -n app
secret/harbor created
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/dubbo-demo-service/dp.yaml
deployment.extensions/dubbo-demo-service created
dubbo-monitor工具
节点 132
[root@ceshi-132 ~]# unzip dubbo-monitor-master.zip
[root@ceshi-132 conf]# vi /root/dubbo-monitor/dubbo-monitor-simple/conf/dubbo_origin.properties
dubbo.container=log4j,spring,registry,jetty
dubbo.application.name=simple-monitor
dubbo.application.owner=liujiangxu
dubbo.registry.address=zookeeper://zk1.od.com:2181?backup=zk2.od.com:2181,zk3.od.com:2181
dubbo.protocol.port=20880
dubbo.jetty.port=8080
dubbo.jetty.directory=/dubbo-monitor-simple/monitor
dubbo.charts.directory=/dubbo-monitor-simple/charts
dubbo.statistics.directory=/dubbo-monitor-simple/statistics
dubbo.log4j.file=logs/dubbo-monitor-simple.log
dubbo.log4j.level=WARN
构建dockerfile
[root@ceshi-132 ~]# cp -a dubbo-monitor /data/dockerfile/
[root@ceshi-132 ~]# cd /data/dockerfile/dubbo-monitor
[root@ceshi-132 dubbo-monitor]# docker build . -t harbor.od.com/infra/dubbo-monitor:latest
[root@ceshi-132 dubbo-monitor]# docker push harbor.od.com/infra/dubbo-monitor:latest
交付k8s配置资源清单
[root@ceshi-132 dubbo-monitor]# cat dp.yaml
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: dubbo-monitor
namespace: infra
labels:
name: dubbo-monitor
spec:
replicas: 1
selector:
matchLabels:
name: dubbo-monitor
template:
metadata:
labels:
app: dubbo-monitor
name: dubbo-monitor
spec:
containers:
- name: dubbo-monitor
image: harbor.od.com/infra/dubbo-monitor:latest
ports:
- containerPort: 8080
protocol: TCP
- containerPort: 20880
protocol: TCP
imagePullPolicy: IfNotPresent
imagePullSecrets:
- name: harbor
restartPolicy: Always
terminationGracePeriodSeconds: 30
securityContext:
runAsUser: 0
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 7
progressDeadlineSeconds: 600
[root@ceshi-132 dubbo-monitor]# cat svc.yaml
kind: Service
apiVersion: v1
metadata:
name: dubbo-monitor
namespace: infra
spec:
ports:
- protocol: TCP
port: 8080 集群网络端口(集群ip只有一个服务,按道理端口随便配置)
targetPort: 8080 容器内端口
selector:
app: dubbo-monitor
[root@ceshi-132 dubbo-monitor]# cat ingress.yaml
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: dubbo-monitor
namespace: infra
spec:
rules:
- host: dubbo-monitor.od.com
http:
paths:
- path: /
backend:
serviceName: dubbo-monitor
servicePort: 8080 对应service.yaml中port端口配置
DNS解析ingress配置中 host域名
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/dubbo-monitor/dp.yaml
deployment.extensions/dubbo-monitor created
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/dubbo-monitor/svc.yaml
service/dubbo-monitor created
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/dubbo-monitor/ingress.yaml
ingress.extensions/dubbo-monitor created
[root@ceshi-130 ~]# kubectl get pods -n infra -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dubbo-monitor-5bb45c8b97-fnwt7 1/1 Running 0 6m59s 172.7.21.9 ceshi-130.host.com <none> <none>
jenkins-698b4994c8-hm5wf 1/1 Running 0 7d6h 172.7.22.9 ceshi-131.host.com <none> <none>
交付dubbo服务消费者
编辑资源配置清单
节点 132
[root@ceshi-132 ~]# cd /data/k8s-yaml/
[root@ceshi-132 k8s-yaml]# mkdir dubbo-demo-web
[root@ceshi-132 k8s-yaml]# cd dubbo-demo-web/
[root@ceshi-132 dubbo-demo-web]# vi dp.yaml
kind: Deployment 定义资源类别,要创建的是POD就写为pod、Deployment、StatefulSet等
apiVersion: extensions/v1beta1 定义版本
metadata: 元数据信息,包含资源名称、namespace等。namespace用于给资源进行分类,默认会有一个default名称空间
name: dubbo-demo-consumer 名称
namespace: app 名称空间
labels: 标签
name: dubbo-demo-consumer 名称
spec: 声明资源的属性状态,也就是说希望deployment是什么样的
replicas: 1 副本数量
selector: 控制器选择器,通过他指定该控制器管理那些pod
matchLabels: labels匹配规则
name: dubbo-demo-consumer 名称
template: 模板,当副本数量不足根据下面模板创建pod副本
metadata: 元数据
labels: 标签
app: dubbo-demo-consumer给自己打标签
name: dubbo-demo-consumer名称
spec: 声明资源的属性状态,也就是说希望deployment是什么样的
containers: 创建容器
- name: dubbo-demo-consumer 名称
image: harbor.od.com/app/dubbo-demo-consumer:master_20210826_1040 镜像地址
ports: 端口设置
- containerPort: 8080
protocol: TCP
- containerPort: 20880
protocol: TCP
env: 设置变量
- name: JAR_BALL JAR_BALL=dubbo-client.jar
value: dubbo-client.jar
imagePullPolicy: IfNotPresent 优先使用本地image,本地没有再去下载
imagePullSecrets: 引用创建secrets,私有仓库必须加此参数
- name: harbor 当时创建secret时的名称
restartPolicy: Always 容器停止运行时的重启策略
terminationGracePeriodSeconds: 30 等待pod缓冲时长,默认30秒,比如当你启动pod超过30秒还没running将被强制结束,此值根据实际业务
securityContext: 容器的进程都以UserID 0 的身份运行
runAsUser: 0 root方式启动
schedulerName: default-scheduler 调度运算节点:默认调度方式
strategy: 将现有pod替换为新pod的部署策略
type: RollingUpdate 滚动更新配置参数,仅当类型为RollingUpdate
rollingUpdate:
maxUnavailable: 1 和期望ready的副本数比例
maxSurge: 1 滚动更新过程产生的最大pod数量
revisionHistoryLimit: 7 定义保留的升级记录数
progressDeadlineSeconds: 600 滚动升级的最大时间600秒
[root@ceshi-132 dubbo-demo-web]# vi svc.yaml
kind: Service
apiVersion: v1
metadata:
name: dubbo-demo-consumer
namespace: app
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
app: dubbo-demo-consumer
[root@ceshi-132 dubbo-demo-web]# vi ingress.yaml
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: dubbo-demo-consumer
namespace: app
spec:
rules:
- host: demo.od.com
http:
paths:
- path: /
backend:
serviceName: dubbo-demo-consumer
servicePort: 8080
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/dubbo-demo-web/dp.yaml
deployment.extensions/dubbo-demo-consumer created
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/dubbo-demo-web/svc.yaml
service/dubbo-demo-consumer created
[root@ceshi-130 ~]# kubectl apply -f http://k8s-yaml.od.com/dubbo-demo-web/ingress.yaml
ingress.extensions/dubbo-demo-consumer created