基于jenkins的CI/CD设计和发布流程

一、发布流程设计

  • 拉取代码;;
  • 编译,java微服务多数为jar包;
  • 把jar包按照业务逻辑copy到基础镜像中;
  • 部署到K8S平台,编写yaml文件;
  • 暴露服务;
mark

二、准备基础环境

  • K8S(Ingress Controller,CoreDNS,PV自动供给)
  • Helm V3
  • Gitlab
  • Harbor,并存储Chart存储功能
  • MYSQL(微服务数据库)

1.1 容器化部署gitlab

# mkdir gitlab  #下面会用到此目录作为数据持久化,假如后期gitlab或者docker出现问题,只需要copy此目录到其它机器,重新运行如下启动命令即可;
# cd gitlab
# docker run -d \
  --name gitlab \
  -p 8443:443 \
  -p 9999:80 \
  -p 9998:22 \
  -v $PWD/config:/etc/gitlab \
  -v $PWD/logs:/var/log/gitlab \
  -v $PWD/data:/var/opt/gitlab \
  -v /etc/localtime:/etc/localtime \
  zhdya/gitlab-ce-zh:latest
 
官方镜像:gitlab/gitlab-ce:latest

访问地址:http://IP:9999

初次会先设置管理员密码 ,然后登陆,默认管理员用户名root,密码就是刚设置的。

1.2 创建项目,提交测试代码

代码分支说明:

  • dev1 交付代码

  • dev2 编写Dockerfile构建镜像

  • dev3 K8S资源编排

  • dev4 增加微服务链路监控

  • master 最终上线

拉取dev3分支,推送到私有代码仓库:

git clone http://192.168.171.10:9999/root/microservice.git
cp -rf simple-microservice/* microservice
cd microservice
git add .
git config --global user.email "you@example.com"
git config --global user.name "Your Name"
git commit -m 'all'
git push origin master

2.1 安装docker与docker-compose

# wget http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
# yum install docker-ce -y
# systemctl start docker
# systemctl enable docker
curl -L https://github.com/docker/compose/releases/download/1.25.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose

2.2 解压离线包部署

# tar zxvf harbor-offline-installer-v1.9.1.tgz
# cd harbor
# vi harbor.yml
hostname: 192.168.171.10
# ./prepare
# ./install.sh --with-chartmuseum
# docker-compose ps

--with-chartmuseum 参数表示启用Charts存储功能。

2.3 配置Docker可信任

由于habor未配置https,还需要在docker配置可信任。

# cat /etc/docker/daemon.json 
{"registry-mirrors": ["http://f1361db2.m.daocloud.io"],
  "insecure-registries": ["192.168.171.10"]
}
# systemctl restart docker

3.1 应用包管理器 Helm & 安装Helm工具

# wget https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz
# tar zxvf helm-v3.0.0-linux-amd64.tar.gz 
# mv linux-amd64/helm /usr/bin/

3.2 配置国内Chart仓库

# helm repo add stable http://mirror.azure.cn/kubernetes/charts
# helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts 
# helm repo list

3.3 安装push插件

# helm plugin install https://github.com/chartmuseum/helm-push

如果网络下载不了,也可以直接解压课件里包:

# tar zxvf helm-push_0.7.1_linux_amd64.tar.gz
# mkdir -p /root/.local/share/helm/plugins/helm-push
# chmod +x bin/*
# mv bin plugin.yaml /root/.local/share/helm/plugins/helm-push

3.4 添加repo

# helm repo add  --username admin --password Harbor12345 myrepo http://192.168.171.10/chartrepo/library

3.5 推送与安装Chart

# helm push mysql-1.4.0.tgz --username=admin --password=Harbor12345 http://192.168.171.10/chartrepo/library
# helm install web --version 1.4.0 myrepo/demo

4、微服务数据库 MySQL

# yum install mariadb-server -y
# mysqladmin -uroot password '123456'

或者docker创建

docker run -d --name db -p 3306:3306 -v /opt/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7 --character-set-server=utf8

最后将微服务数据库导入。

5、K8S PV自动供给

关于如上的PV自动供给我再次配置下:

先准备一台NFS服务器为K8S提供存储支持。

# yum install -y nfs-utils
# cat /etc/exports
/opt/sharedata 192.168.171.0/24(rw,sync,insecure,no_subtree_check,no_root_squash)

# mkdir -p /opt/sharedata
# systemctl enable nfs
# systemctl start nfs
# service nfs status
并且要在每个Node上安装nfs-utils,用于mount挂载时候使用。

由于K8S不支持NFS自动供给,还需要先安装nfs-client-provisioner插件:

# cd nfs-client
# vim deployment.yaml   # 修改NFS地址和共享目录地址
[root@k8s-master1 ~]# cd nfs-client
[root@k8s-master1 nfs-client]# ls
class.yaml  deployment.yaml  rbac.yaml
[root@k8s-master1 nfs-client]# vim deployment.yaml
[root@k8s-master1 nfs-client]# cat deployment.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          #image: quay.io/external_storage/nfs-client-provisioner:latest
          image: registry.cn-hangzhou.aliyuncs.com/cdw/nfs-client-provisioner
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 192.168.171.12
            - name: NFS_PATH
              value: /opt/sharedata
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.171.12
            path: /opt/sharedata

# cat class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage # storage的name,申请pv的时候需要指定此name;
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  archiveOnDelete: "true"


# kubectl apply -f .
[root@k8s-master1 nfs-client]# kubectl get po
NAME                                     READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-f9fdd5cc9-m2f62   1/1     Running   0          14s