DevOps(Development和Operations的组合词)是一组过程、方法与系统的统称,用于促进开发(应用程序/软件工程)、技术运营和质量保障(QA)部门之间的沟通、协作与整合。
序号 | 主机名 | IP地址 | 角色 | 系统版本 | 硬件配置 |
---|---|---|---|---|---|
1 | k8s-master | 192.168.134.110 | 管理节点 | centos7 | 2CPU/8G内存/30G存储 |
2 | k8s-node1 | 192.168.134.111 | 工作节点 | centos7 | 2CPU/8G内存/30G存储 |
3 | k8s-node2 | 192.168.134.112 | 工作节点 | centos7 | 2CPU/8G内存/30G存储 |
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
cat >> /etc/hosts << EOF
192.168.134.110 k8s-master
192.168.134.111 k8s-node1
192.168.134.112 k8s-node2
EOF
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git lrzsz
setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
kubernetes要求必须关闭swap分区
swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
# 安装软件
yum -y install ntpdate
# 制定时间同步计划任务
crontab -e
0 */1 * * * ntpdate time1.aliyun.com
转发IPv4并让iptables看到桥接流量,
cat <<EOF > /etc/sysctl.d/kubernetes.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
#执行
sysctl --system
# 应用 sysctl 参数而不重新启动
#如果上一条命令报错可尝试使用本命令修复之后再使用sysctl命令加载配置
modprobe bridge
在kubernetes中Service有两种代理模式,一种是基于iptables的,一种是基于ipvs,两者对比ipvs负载均衡算法更加的灵活,且带有健康检查的功能,如果想要使用ipvs模式,需要手动载入ipvs模块
#模块加载
modprobe br_netfilter
#创建 /etc/modules-load.d/ipvs.conf 并加入以下内容:
cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack #内核小于4.18,把这行改成nf_conntrack_ipv4
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
#加载containerd相关内核模块
cat > /etc/modules-load.d/kunenetes.conf << EOF
overlay
br_netfilter
EOF
#然后执行
systemctl enable --now systemd-modules-load.service
#所有节点配置完内核后,重启服务器,保证重启后内核依旧加载
reboot -h now
#重启后查看ipvs模块加载情况:
lsmod | grep --color=auto -e ip_vs -e nf_conntrack
#重启后查看containerd相关模块加载情况:
lsmod | egrep 'br_netfilter | overlay'
yum install -y yum-utils
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#版本自选
yum -y install docker-ce-20.10.* docker-ce-cli-20.10.* containerd.io
# kubernetes 官方推荐 docker 等使用 systemd 作为 cgroupdriver,否则 kubelet 启动不了
mkdir /etc/docker
cat <<EOF > /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://ud6340vz.mirror.aliyuncs.com"]
}
EOF
或者
tee /etc/docker/daemon.json <<-'EOF'
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://docker.rainbond.cc",
"https://axvfsf7e.mirror.aliyuncs.com",
"https://ud6340vz.mirror.aliyuncs.com"
]
}
EOF
systemctl daemon-reload
systemctl enable docker && systemctl start docker
# 查看状态
systemctl status docker
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#指定版本安装
yum install -y kubelet-1.20.10 kubectl-1.20.10 kubeadm-1.20.10
#启动
systemctl enable kubelet && systemctl start kubelet
#生成配置文件
kubeadm config print init-defaults > kubeadm-config.yaml
#修改配置文件
修改内容如下:
#第12行配置主节点IP地址
advertiseAddress: 192.168.134.110
#第16行修改
name: k8s-master
#第32行修改镜像下载地址
imageRepository: registry.aliyuncs.com/google_containers
#第34行修改版本为
kubernetesVersion: v1.20.0
#第38行新增pod运行地址
podSubnet: 10.244.0.0/16
并在尾末加入下方配置:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#初始化安装
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
根据日志提供执行以下操作为kebuctl提供
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#将master生成后的token复制到工作节点执行,就能将工作节点添加到k8s集群中
#如果找不到这个加入命令可以使用进行kubeadm token create --print-join-command重新生成
kubeadm join 192.168.47.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:e21b9ed65344507f4f38eddeca3e3472fd825d92d1dc84a47136a4b6c5b04f9c
#删除一个工作节点
kubectl delete nodes k8s-node2
查看一下节点
kubectl get nodes
可以看到节点status为notready状态,这个是因为需要安装网络插件。kubernetes网络插件有很多,比如flannel、calico等等,具体区别可以自行查询,本次我选用的是calico,网络插件需安装否则 node 是 NotReady 状态(主节点跑)
#下载部署 Calicos's
curl -O https://docs.tigera.io/archive/v3.18/manifests/calico.yaml
#修改文件vim calico
3672 - name: CALICO_IPV4POOL_CIDR
3673 value: "10.244.0.0/16"
添加
3688 - name: IP_AUTODETECTION_METHOD
3689 value: "interface=ens32"
#应用文件
kubectl apply -f calico.yaml
查看calico部署进度
kubectl get pod -n kube-system
#如果有问题可以使用describe进行查看
kubectl describe pod calico-node-c7v7q -n kube-system
官网地址:https://kubernetes.github.io/ingress-nginx/
使用helm安装ingress
安装helm,helm类似于centos中的yum仓库
#下载helm
wget https://get.helm.sh/helm-v3.2.3-linux-amd64.tar.gz
#解压
tar -xf helm-v3.2.3-linux-amd64.tar.gz
#进入解压后的目录
cd linux-amd64
#移动文件到/usr/local/bin/
mv helm /usr/local/bin/
#查看版本
helm version
#添加阿里云helm仓库
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
# 查看仓库列表
helm repo list
# 搜索 ingress-nginx
helm search repo ingress-nginx
下载ingress包
#下载ingress包
helm pull ingress-nginx/ingress-nginx --version=4.2.5
#解压
tar -xf ingress-nginx-4.2.5.tgz
#进入解压后的包中
cd ingress-nginx
修改配置values.yaml
#镜像地址:修改为国内镜像
registry: registry.cn-hangzhou.aliyuncs.com #总共两处
image: google_containers/nginx-ingress-controller
image: google_containers/kube-webhook-certgen
#注释掉hash校验
digest: sha256:549e71a6ca248c5abd51cdb73dbc3083df62cf92ed5e6147c780e30f7e007a47 #总共两处
digestChroot: sha256:a8466b19c621bd550b1645e27a004a5cc85009c858a9ab19490216735ac432b1
# 增加选择器,如果 node 上有 ingress=true 就部署
修改部署配置的 kind: DaemonSet
找到:nodeSelector:
增加 ingress: "true"
#使用主机的网络模式
hostNetwork: true
#dns策略
dnsPolicy: ClusterFirst 默认集群优先改为 dnsPolicy: ClusterFirstWithHostNet基于主机名
#
将 admissionWebhooks.enabled 修改为 false
#如果服务器是云平台
将 service 中的 type 由 LoadBalancer 修改为 ClusterIP(搜索LoadBalancer)
部署ingress
#创建一个命名空间
kubectl create ns ingress-nginx
# 为需要部署 ingress 的节点上加标签
kubectl label node k8s-master ingress=true
kubectl label node k8s-node1 ingress=true
# 安装 ingress-nginx
helm install ingress-nginx -n ingress-nginx .
查看是否部署成功
kubectl get pod -n ingress-nginx -o wide
GitLab 是一个用于仓库管理系统的开源项目,使用 Git 作为代码管理工具,并在此基础上搭建起来的 Web 服务。
#使用docker-compose安装
#vim docker-compose.yml
version: '3.7'
services:
gitlab:
image: 'gitlab/gitlab-ce:14.1.0-ce.0'
container_name: gitlab
restart: always
ports:
- '8085:80'
- '8443:443'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'http://192.168.134.111'
gitlab_rails['time_zone'] = 'Asia/Shanghai'
puma['worker_processes'] = 2
sidekiq['max_concurrency'] = 8
postgresql['shared_buffers'] = "128MB"
postgresql['max_worker_processes'] = 4
prometheus_monitoring['enable'] = false
volumes:
- '/usr/local/gitlab/etc:/etc/gitlab'
- '/usr/local/gitlab/log:/var/log/gitlab'
- '/usr/local/gitlab/data:/var/opt/gitlab'
#启动
docker-compose up -d
#进入容器
docker exec -it gitlab bash
#重新初始化并重启
gitlab-ctl reconfigure
gitlab-ctl restart
访问页面
ip:端口
查看默认密码
cat /usr/local/gitlab/etc/initial_root_password
登录后修改默认密码:右上角头像
> Perferences
> Password
修改系统配置:点击左上角三横
> Admin
> Settings
> General
> Account and limit
> 取消 Gravatar enabled
> Save changes
关闭用户注册功能:Settings
> General
> Sign-up restrictions
> 取消 Sign-up enabled
> Save changes
开启 webhook 外部访问:Settings
> Network
> Outbound requests
> Allow requests to the local network from web hooks and services 勾选
> Save changes
设置语言为中文(全局):Settings
> Preferences
> Localization
> Default language
> 选择简体中文
> Save changes
设置当前用户语言为中文:右上角用户头像
> Preferences
> Localization
> Language
> 选择简体中文
> Save changes
echo root > ./username
echo 12345678 > password
kubectl create secret generic git-user-pass --from-file=./username --from-file=./password -n kube-devops
修改系统配置:点击左上角三横
> 项目
> 你的项目
> 新建项目
> 填写项目名称保存
#初始化本第git
git init
git remote add origin http://192.168.134.111:8085/devops/blog.git
git add .
git commit -m "初始版本"
git push -u origin master
#创建harbor包存放目录
mkdir -p /opt/harbor && cd /opt/harbor
#下载地址
wget https://github.com/goharbor/harbor/releases/download/v1.10.10/harbor-online-installer-v1.10.10.tgz
#解压
tar -zxf harbor-online-installer-v1.10.10.tgz
#进入解压后的包
cd /opt/harbor/harbor
vim harbor.yml
#配置文件harbor.yml详解
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: 机器ip地址或域名
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
# https related config
#下面的https需要注释掉
#https:
# https port for harbor, default is 443
# port: 443
# The path of cert and key files for nginx
# certificate: /your/certificate/path
# private_key: /your/private/key/path
./install.sh
#Harbor依赖的镜像及启动服务查看
docker-compose ps
#访问
http://ip:prot
登录
> 新建项目
> 项目名称
> 确认
# 创建 harbor 访问账号密码(需要将下访问的配置信息改成你自己的)
kubectl create secret docker-registry harbor-secret --docker-server=192.168.134.111:8086 --docker-username=admin --docker-password=123456 -n kube-devops
6.每台机器docker配置添加
#vim /etc/docker/daemon.json
添加,harbor仓库地址和端口
"insecure-registries": ["192.168.134.111:8086"]
#添加后重启docker
systemctl restart docker
用于共享数据
# 安装 nfs
yum install nfs-utils -y
# 启动 nfs
systemctl start nfs-server
# 查看 nfs 版本
cat /proc/fs/nfsd/versions
# 创建共享目录
mkdir -p /opt/data/k8s-nfs/
# 设置共享目录 export
vim /etc/exports
#rw读写,ro只读,192.168.134.0/24允许访问的网段
/opt/data/k8s-nfs/ 192.168.134.0/24(rw,sync,no_subtree_check,no_root_squash)
# 重新加载
exportfs -f
systemctl reload nfs-server
# 到其他测试节点安装 nfs-utils 并加载测试,客户端测不需要启动
yum install nfs-utils -y
mkdir -p /opt/data/nfs
mount -t nfs 192.168.134.110:/opt/data/k8s-nfs /opt/data/nfs
#取消挂载
umount /opt/data/nfs/
下载apache-maven-3.9.0-bin.tar.gz,sonar-scanner-cli-4.8.0.2856-linux.zip
mkdir /opt/k8s/jenkins
#Maven用于打包java项目
wget https://archive.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz
#sonar-scanner用于代码分析,(可以不使用)
wget https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.8.0.2856-linux.zip
编写Dockerfile
#vim Dockerfile
FROM jenkins/jenkins:2.392-jdk11
ADD ./apache-maven-3.6.1-bin.tar.gz /usr/local/
USER root
WORKDIR /usr/local/
ENV MAVEN_HOME=/usr/local/apache-maven-3.6.1
ENV PATH=$JAVA_HOME/bin:$MAVEN_HOME/bin:$PATH
RUN echo "jenkins ALL=NOPASSWD: ALL" >> /etc/sudoers
USER jenkins
#构建镜像
docker build -t 192.168.134.111:8086/devops/jenkins-maven:jdk-11 .
# 登录 harbor
docker login -uadmin 192.168.134.111:8086
# 推送镜像到 harbor
docker push 192.168.134.111:8086/devops/jenkins-maven:jdk-11
mkdir -p /opt/k8s/jenkins/manifests && cd /opt/k8s/jenkins/manifests
# vim jenkins-deployment.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mvn-settings
namespace: kube-devops
labels:
app: jenkins-server
data:
settings.xml: |-
<?xml version="1.0" encoding="UTF-8"?>
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
<localRepository>/var/jenkins_home/maven-repo</localRepository>
<servers>
<server>
<id>releases</id>
<username>admin</username>
<password>wolfcode</password>
</server>
<server>
<id>snapshots</id>
<username>admin</username>
<password>wolfcode</password>
</server>
</servers>
<mirrors>
<mirror>
<id>nexus</id>
<name>aliyun</name>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
<mirrorOf>central</mirrorOf>
</mirror>
</mirrors>
<pluginGroups>
<pluginGroup>org.sonarsource.scanner.maven</pluginGroup>
</pluginGroups>
<profiles>
<profile>
<id>releases</id>
<activation>
<activeByDefault>true</activeByDefault>
<jdk>1.8</jdk>
</activation>
</profile>
</profiles>
</settings>
---
apiVersion: v1
kind: PersistentVolume #资源类型为pv
metadata:
name: jenkins-pv
namespace: kube-devops
spec:
capacity: #容量
storage: 5Gi # pv 的具体容量
volumeMode: Filesystem # 存储类型为文件系统
accessModes: # 访问模式:ReadWriteOnce、ReadWriteMany、ReadOnlyMany
- ReadWriteOnce # 只能被一个pvc使用
persistentVolumeReclaimPolicy: Recycle # 回收策略 delete Retain
storageClassName: jenkins-data # 创建 PV 的存储类名,需要与 pvc 的相同
nfs: # 连接到 nfs
path: /opt/data/k8s-nfs/ # 存储路径
server: 192.168.134.110 # nfs 服务地址
---
apiVersion: v1
kind: PersistentVolumeClaim #资源类型为pvc
metadata:
name: jenkins-pvc
namespace: kube-devops
spec:
volumeMode: Filesystem # 存储类型为文件系统
accessModes: # 访问模式:ReadWriteOnce、ReadWriteMany、ReadOnlyMany
- ReadWriteOnce # 权限需要与对应的 pv 相同
resources: #最少资源
requests:
storage: 3Gi # 资源可以小于 pv 的,但是不能大于,如果大于就会匹配不到 pv
storageClassName: jenkins-data # 名字需要与对应的 pv 相同
---
apiVersion: v1
kind: Service
metadata:
name: jenkins-service
namespace: kube-devops
annotations:
prometheus.io/scrape: 'true'
prometheus.io/path: /
prometheus.io/port: '8080'
spec:
selector:
app: jenkins-server
type: NodePort
ports:
- port: 8080
targetPort: 8080
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins-admin
namespace: kube-devops
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: jenkins-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: jenkins-admin
namespace: kube-devops
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jenkins
namespace: kube-devops
spec:
replicas: 1
selector:
matchLabels:
app: jenkins-server
template:
metadata:
labels:
app: jenkins-server
spec:
serviceAccountName: jenkins-admin
imagePullSecrets:
- name: harbor-secret # harbor 访问 secret
containers:
- name: jenkins
image: 192.168.134.111:8086/devops/jenkins-maven:jdk-11
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
runAsUser: 0 # 使用 root 用户运行容器
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
memory: "500Mi"
cpu: "500m"
ports:
- name: httpport
containerPort: 8080
- name: jnlpport
containerPort: 50000
livenessProbe:
httpGet:
path: "/login"
port: 8080
initialDelaySeconds: 90
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 5
readinessProbe:
httpGet:
path: "/login"
port: 8080
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
volumeMounts:
- name: jenkins-data
mountPath: /var/jenkins_home
- name: docker
mountPath: /run/docker.sock
- name: docker-home
mountPath: /usr/bin/docker
- name: mvn-setting
mountPath: /usr/local/apache-maven-3.6.1/conf/settings.xml
subPath: settings.xml
- name: daemon
mountPath: /etc/docker/daemon.json
subPath: daemon.json
- name: kubectl
mountPath: /usr/bin/kubectl
volumes:
- name: kubectl
hostPath:
path: /usr/bin/kubectl
- name: jenkins-data
persistentVolumeClaim:
claimName: jenkins-pvc
- name: docker
hostPath:
path: /run/docker.sock # 将主机的 docker 映射到容器中
- name: docker-home
hostPath:
path: /usr/bin/docker
- name: mvn-setting
configMap:
name: mvn-settings
items:
- key: settings.xml
path: settings.xml
- name: daemon
hostPath:
path: /etc/docker/
# 进入 jenkins 目录,安装 jenkins
kubectl apply -f jenkins-deployment.yaml
# 查看是否运行成功
kubectl get po -n kube-devops
# 查看 service 端口,通过浏览器访问
kubectl get svc -n kube-devops
# 查看容器日志,获取默认密码
kubectl logs -f pod名称 -n kube-devops
jenkins-ingress
#vim jenkins-ingress.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana-ingress #自定义ingress名称
namespace: kube-devops
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
kubernetes.io/ingress.class: nginx #指定控制器的类别为nginx
spec:
rules: #定义主机列表
- host: www.myjenkins.com #自定义域名
http:
paths:
- pathType: Prefix #路径类型
path: "/" #定义站点路径
backend: #定义后端引用的服务
service: #关联service
name: jenkins-service #对应上面创建的service名称
port:
number: 8080 #service端口
http://ip:port登录后首先点击推荐的插件将他安装完成,如果有些插件搜不到,可以去官网下载 https://plugins.jenkins.io/。
完成后修改密码:右上角admin头像
> 设置
> password
然后再补一下插件:manager jenkins
> 插件管理
>
jenkins + k8s 环境配置
进入 Dashboard > 系统管理 > 节点管理 > Configure Clouds 页面
配置 k8s 集群
名称:kubernetes
点击 Kubernetes Cloud details 继续配置
Kubernetes 地址:
如果 jenkins 是运行在 k8s 容器中,直接配置服务名即可
https://kubernetes.default
如果 jenkins 部署在外部,那么则不仅要配置外部访问 ip 以及 apiserver 的端口(6443),还需要配置服务证书
Jenkins 地址:
如果部署在 k8s 集群内部:http://jenkins-service.kube-devops
如果在外部:http://192.168.134.110:32479(换成自己的)
配置完成后保存即可
系统管理 > 节点管理 > 列表中 master 节点最右侧的齿轮按钮
修改标签的值与项目中 Jenkinsfile 中 agent > kubernetes > label 的值相匹配
gitlab
系统管理
> 安全
> Manage Credentials
> System
> 全局凭据(unrestricted)
> Add Credentials
范围:全局 用户名:root 密码:12345678 ID:gitlab-user-pass
habor:admin/123456
ID:harbor-user-pass
基于springboot编写的blog项目,提前部署好mysql,并导入数据。编写好Dockerfile和jenkinsfile后上传到gitlab
FROM openjdk:8-jdk-alpine
MAINTAINER heber<heber@123.com>
# 安装初始化环境
RUN apk update && \
apk add --no-cache iputils vim busybox-extras tzdata && \
rm -rf /var/cache/apk/*
# 设置时区
ENV TZ=Asia/Shanghai
# 创建应用目录
RUN mkdir -p /app/static/assets
# 设置工作目录为 /app
WORKDIR /app
# 将当前目录下的target/所有东西复制到工作目录
COPY ./static/assets/* /app/static/assets/
COPY ./target/*.jar /app/
COPY ./src/main/resources/*.yml /app/
# 设置 JAVA_OPTS 环境变量(可以通过 docker run -e "JAVA_OPTS=" 进行覆盖)
ENV JAVA_OPTS="-Xms256m -Xmx256m"
# 定义容器启动后执行的命令,运行 Java 应用
#CMD ["java", "${JAVA_OPTS}", "-jar", "/app/blog-0.0.1-SNAPSHOT.jar"]
ENTRYPOINT ["sh", "-c", "java ${JAVA_OPTS} -jar /app/blog-0.0.1-SNAPSHOT.jar"]
pipeline {
agent {
node {
label 'k8s-master'
}
}
// BRANCH_NAME: 用于选择要发布的Git分支。
// NAMESPACE: 用于选择Kubernetes的命名空间。
// TAG_NAME: 用户指定的Docker镜像标签名称。
parameters {
gitParameter(name: 'BRANCH_NAME',branch: '',branchFilter: '.*',defaultValue: 'master',description: '请选择要发布的分支',quickFilterEnabled: false,selectedValue: 'NONE', sortMode: 'NONE', tagFilter: '', type: 'PT_BRANCH')
choice(name: 'NAMESPACE', choices: ['kube-devops', 'kube-devops-test'], description: '选择名称空间')
string(name: 'TAG_NAME', defaultValue: 'snapshot', description: '标签名称,必须以 v 开头,例如:v1、v1.0.0')
}
// 流水线运行时所需的环境变量,包括Docker和Gitlab的认证信息、Kubernetes配置、Docker镜像仓库、应用名称等
environment {
DOCKER_CREDENTIAL_ID = 'harbor-user-pass' //harbor仓库凭证
GIT_REPO_URL = '192.168.134.111:8085' //gitlab地址
GIT_CREDENTIAL_ID = 'gitlab-user-pass' //gitlab凭证
GIT_ACCOUNT = 'root'
KUBECONFIG_CREDENTIAL_ID = '5f5a7c22-30e6-47e9-9920-f04594569ab8' //k8s凭证
REGISTRY = '192.168.134.111:8086' //harbor仓库地址
DOCKERHUB_NAMESPACE = 'devops' //harbor仓库名称
APP_NAME = 'blog' //镜像打包名
//SONAR_SERVER_URL = 'http://172.18.0.114:30063'
//SONAR_CREDENTIAL_ID = 'sonarqube-token'
}
//选择拉取哪个分支的代码
stages {
stage('checkout scm') {
steps {
checkout scmGit(branches: [[name: "$BRANCH_NAME"]], extensions: [], userRemoteConfigs: [[credentialsId: "gitlab-user-pass", url: "http://192.168.134.111:8085/devops/blog.git"]])
}
}
// 使用Maven进行单元测试
stage('unit test') {
steps {
sh 'mvn clean test'
}
}
// 清理项目并打包成可执行的Jar包,构建Docker镜像并推送到指定的镜像仓库
stage('build & push') {
steps {
sh 'mvn clean package -DskipTests'
sh 'docker build -f Dockerfile -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER .'
//登录harbor仓库并推送镜像到仓库
withCredentials([usernamePassword(credentialsId: 'harbor-user-pass', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USERNAME')]) {
sh '''echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin
docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER'''
}
}
}
// 将最新构建的Docker镜像标记为 latest 并推送
stage('push latest') {
steps {
sh 'docker tag $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest'
}
}
// 当指定的 TAG_NAME 符合以 v 开头的格式时,将镜像标记并推送到镜像仓库,同时在Git仓库打上标签并推送
stage('push with tag') {
when {
expression {
params.TAG_NAME =~ /v.*/
}
}
steps {
//input(message: 'release image with tag?', submitter: '')
sh 'docker tag $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BUILD_NUMBER $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:$TAG_NAME'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:$TAG_NAME'
}
}
// 当指定的 TAG_NAME 符合以 v 开头的格式时,将应用部署到生产环境
stage('deploy to production') {
when {
expression {
params.TAG_NAME =~ /v.*/
}
}
steps {
//input(message: 'deploy to production?', submitter: '')
sh '''sed -i\'\' "s#REGISTRY#$REGISTRY#" deploy/blog-deployment.yml
sed -i\'\' "s#DOCKERHUB_NAMESPACE#$DOCKERHUB_NAMESPACE#" deploy/blog-deployment.yml
sed -i\'\' "s#APP_NAME#$APP_NAME#" deploy/blog-deployment.yml
sed -i\'\' "s#TAG_NAME#$TAG_NAME#" deploy/blog-deployment.yml
kubectl apply -f deploy/blog-deployment.yml'''
}
}
}
}
Dashboard
> 新建任务
> 填写项目名称(blog)
> 选择pipeline
> 确认
> 流水线 定义
> 选择 pipeline script from SVM
> SVM 选这git
> 填写gitlab仓库地址:http://192.168.134.111:8085/devops/blog.git
> 选择之前创建的gitlab凭证
> 保存
创建文件夹mkdir -p /opt/data/k8s-nfs/blog/ 上传图片文件
# blog-deployment.yml
apiVersion: v1
kind: PersistentVolume #资源类型为pv
metadata:
name: blog-pv
namespace: kube-devops
spec:
capacity: #容量
storage: 5Gi # pv 的具体容量
volumeMode: Filesystem # 存储类型为文件系统
accessModes: # 访问模式:ReadWriteOnce、ReadWriteMany、ReadOnlyMany
- ReadWriteOnce # 只能被一个pvc使用
persistentVolumeReclaimPolicy: Recycle # 回收策略 delete Retain
storageClassName: blog # 创建 PV 的存储类名,需要与 pvc 的相同
nfs: # 连接到 nfs
path: /opt/data/k8s-nfs/blog/static/assets/ # 存储路径
server: 192.168.134.110 # nfs 服务地址
---
apiVersion: v1
kind: PersistentVolumeClaim #资源类型为pvc
metadata:
name: blog-pvc
namespace: kube-devops
spec:
volumeMode: Filesystem # 存储类型为文件系统
accessModes: # 访问模式:ReadWriteOnce、ReadWriteMany、ReadOnlyMany
- ReadWriteOnce # 权限需要与对应的 pv 相同
resources: #最少资源
requests:
storage: 3Gi # 资源可以小于 pv 的,但是不能大于,如果大于就会匹配不到 pv
storageClassName: blog # 名字需要与对应的 pv 相同
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blog-deployment
name: blog-deployment
namespace: kube-devops
spec:
progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
app: blog-deployment
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
labels:
app: blog-deployment
spec:
imagePullSecrets:
- name: harbor-secret
containers:
- name: blog-deployment
image: REGISTRY/DOCKERHUB_NAMESPACE/APP_NAME:TAG_NAME
readinessProbe:
httpGet:
path: /
port: 8080
timeoutSeconds: 10
failureThreshold: 30
periodSeconds: 5
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 300m
memory: 600Mi
requests:
cpu: 100m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- name: blog
mountPath: /app/static/assets/
volumes:
- name: blog
persistentVolumeClaim:
claimName: blog-pvc
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds:
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blog-service
name: blog-service
namespace: kube-devops
spec:
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: blog-deployment
sessionAffinity: None
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
name: blog-service-clusterip
namespace: kube-devops
spec:
selector:
app: blog-deployment
type: ClusterIP
ports:
- protocol: TCP
port: 8080
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: blog-ingress #自定义ingress名称
namespace: kube-devops
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
kubernetes.io/ingress.class: nginx #指定控制器的类别为nginx
spec:
rules: #定义主机列表
- host: www.blog.com #自定义域名
http:
paths:
- pathType: Prefix #路径类型
path: "/" #定义站点路径
backend: #定义后端引用的服务
service: #关联service
name: blog-service-clusterip #对应上面创建的service名称
port:
number: 8080 #service端口