k8s部署手冊-v04

語言: CN / TW / HK

一、基礎配置


1.修改主機名


1.hostnamectl set-hostname k8s-master01
2.hostnamectl set-hostname k8s-master02
3.hostnamectl set-hostname k8s-master03
4.hostnamectl set-hostname k8s-node01
5.hostnamectl set-hostname k8s-node02


2.新增 主機名與IP地址解析

cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.1.62 lookup apiserver.cluster.local
192.168.1.60 k8s-master01
192.168.1.61 k8s-master02
192.168.1.62 k8s-master03
192.168.1.63 k8s-node01
192.168.1.64 k8s-node02
EOF

3.升級伺服器核心,時間同步,關閉防火牆,重啟伺服器

#新增訪問互聯路由
cat > /etc/resolv.conf <<EOF
nameserver 114.114.114.114
nameserver 8.8.8.8
EOF

cat /etc/resolv.conf

# ssh連線Linux比較慢
#sed -i "s|#UseDNS yes|UseDNS no|" /etc/ssh/sshd_config
#sed -i "s|GSSAPIAuthentication yes|GSSAPIAuthentication no|" /etc/ssh/sshd_config


#設定為阿里雲yum源

rm -rf /etc/yum.repos.d/bak && mkdir -p /etc/yum.repos.d/bak && mv /etc/yum.repos.d/* /etc/yum.repos.d/bak

curl -o /etc/yum.repos.d/CentOS-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo


yum clean all && yum makecache

cd /etc/yum.repos.d


#CentOS7使用/etc/rc.d/rc.local設定開機自動啟動
chmod +x /etc/rc.d/rc.local

#安裝依賴包

yum -y install vim net-tools lrzsz unzip gcc telnet wget sshpass ntpdate ntp curl

yum -y install conntrack ipvsadm ipset jq iptables  sysstat libseccomp git  

#時間同步
echo '*/5 * * * * /usr/sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1'>/var/spool/cron/root && crontab -l



#設定防火牆為 Iptables 並設定空規則
systemctl  stop firewalld  &&  systemctl  disable firewalld

yum -y install iptables-services  &&  systemctl  start iptables  &&  systemctl  enable iptables  &&  iptables -F  &&  service iptables save


#關閉 SELINUX
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

#調整核心引數,對於 K8S
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
#net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空間,只有當系統 OOM 時才允許使用它
vm.overcommit_memory=1 # 不檢查實體記憶體是否夠用
vm.panic_on_oom=0 # 開啟 OOM  
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

modprobe ip_vs_rr && modprobe br_netfilter && sysctl -p /etc/sysctl.d/kubernetes.conf


#關閉系統不需要服務
systemctl stop postfix && systemctl disable postfix

4.升級核心,重啟伺服器

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
 
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
 
yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
 
awk -F \' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
grub2-set-default "CentOS Linux (5.4.225-1.el7.elrepo.x86_64) 7 (Core)"
#grub2-set-default 'CentOS Linux (4.4.222-1.el7.elrepo.x86_64) 7 (Core)'
#重啟伺服器
reboot
################################

二、sealos部署k8s-v1.19

1.安裝sealos3.3


#新增訪問互聯路由
cat > /etc/resolv.conf <<EOF
nameserver 8.8.8.8
nameserver 114.114.114.114
nameserver 223.5.5.5
EOF
 
cat /etc/resolv.conf
 
#時間同步
ntpdate ntp1.aliyun.com
 
 
wget -c https://github.com/fanux/sealos/releases/download/v3.3.8/sealos

tar zxvf sealos*.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin

sealos version

#時間同步
ntpdate ntp1.aliyun.com

2.離線安裝k8s 1.19

連結:https://pan.baidu.com/s/1F9sZoHBX1K1ihBP9rZSHBQ?pwd=jood 
提取碼:jood

#安裝
sealos init --passwd 1qaz@WSX \
	--master 192.168.1.60 \
	--master 192.168.1.61 \
	--master 192.168.1.62 \
	--node 192.168.1.63 \
	--node 192.168.1.64 \
	--pkg-url /root/kube1.19.16.tar.gz \
	--version v1.19.16

3.驗證叢集

kubectl get nodes
 
kubectl get pod -A
 
 
 
#配置kubectl自動補全
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> /etc/profile


#檢視汙點
kubectl describe node |grep -i taints

#去除汙點

#kubectl taint node k8s-master02 node-role.kubernetes.io/master:NoSchedule-
#kubectl taint node k8s-master03 node-role.kubernetes.io/master:NoSchedule-

4.sealos3.3常用命令

#新增 node 節點:
sealos join --node 192.168.1.63,192.168.1.64

 
#新增master
sealos join -master 192.168.1.61,192.168.1.62
 
 
#刪除 node 節點:
sealos clean --node 192.168.1.63,192.168.1.64
 
 
#刪除 master 節點:
sealos clean --master 192.168.1.61,192.168.1.62
 

#重置叢集
sealos clean --all -f

5.安裝top命令

cat > /root/top.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --kubelet-insecure-tls
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        #這裡可以自己把metrics-server做到自己的阿里雲映象裡面,並把下面替換成自己的映象地址
        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/metrics-server:v0.4.3
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          periodSeconds: 10
        securityContext:
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {}
        name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100
EOF



kubectl apply -f /root/top.yaml

三、部署nfs

1.服務端

#新增訪問互聯路由
cat > /etc/resolv.conf <<EOF
nameserver 114.114.114.114
nameserver 8.8.8.8
EOF

# 我們這裡在192.168.1.60上安裝(在生產中,大家要提供作好NFS-SERVER環境的規劃)
yum -y install nfs-utils
 
# 建立NFS掛載目錄
mkdir /nfs_dir
chown nobody.nobody /nfs_dir
 
# 修改NFS-SERVER配置
echo '/nfs_dir *(rw,sync,no_root_squash)' > /etc/exports
 
# 重啟服務
systemctl restart rpcbind.service
systemctl restart nfs-utils.service 
systemctl restart nfs-server.service 
 
# 增加NFS-SERVER開機自啟動
systemctl enable  rpcbind.service
systemctl enable  nfs-utils.service 
systemctl enable  nfs-server.service 
 
# 驗證NFS-SERVER是否能正常訪問
#showmount -e 192.168.1.60               
 

2.客戶端

#需要掛載的伺服器執行
mkdir /nfs_dir
yum install nfs-utils -y

#掛載
mount 192.168.1.60:/nfs_dir /nfs_dir

#新增開機掛載
echo "mount 192.168.1.60:/nfs_dir /nfs_dir" >> /etc/rc.local

cat /etc/rc.local

四、部署StorageClass

1.建立nfs-sc.yaml

cat > /root/nfs-sc.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
 
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
 
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system 
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
 
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-provisioner-01
  namespace: kube-system
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-provisioner-01
  template:
    metadata:
      labels:
        app: nfs-provisioner-01
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
#老版本外掛使用jmgao1983/nfs-client-provisioner:latest
#          image: jmgao1983/nfs-client-provisioner:latest
          image: vbouchaud/nfs-client-provisioner:latest
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nfs-provisioner-01  # 此處供應者名字供storageclass呼叫
            - name: NFS_SERVER
              value: 192.168.1.60   # 填入NFS的地址
            - name: NFS_PATH
              value: /nfs_dir   # 填入NFS掛載的目錄
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.1.60   # 填入NFS的地址
            path: /nfs_dir   # 填入NFS掛載的目錄
 
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-boge
provisioner: nfs-provisioner-01
# Supported policies: Delete、 Retain , default is Delete
reclaimPolicy: Retain
EOF




#建立
kubectl apply -f /root/nfs-sc.yaml

#檢視
kubectl -n kube-system get pod


kubectl get sc

五、harbor倉庫搭建

1.安裝

 
#目錄/root上傳檔案docker-compose和harbor-offline-installer-v1.2.0.tgz


mv /root/docker-compose /usr/local/bin/
chmod a+x /usr/local/bin/docker-compose
 
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
 
tar -zxvf harbor-offline-installer-v2.4.1.tgz
 
mv harbor /usr/local/
 
cd /usr/local/harbor/
 
cp harbor.yml.tmpl harbor.yml
 
sed -i 's/hostname: reg.mydomain.com/hostname: 192.168.1.77/g' harbor.yml
sed -i 's/https/#https/g' harbor.yml
sed -i 's/certificate/#certificate/g' harbor.yml
sed -i 's/private_key/#private_key/g' harbor.yml

#資料庫目錄
mkdir /data
 
cat /etc/docker/daemon.json
{
    "registry-mirrors": ["https://nr240upq.mirror.aliyuncs.com", "https://registry.docker-cn.com", "https://docker.mirrors.ustc.edu.cn", "https://dockerhub.azk8s.cn", "http://hub-mirror.c.163.com"],
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "100m"
    },
    "insecure-registries": ["192.168.1.77:80"]
}
 
 
 
systemctl daemon-reload && systemctl restart docker
 
 
#安裝
./install.sh
 
 
## 重啟harbor
cd /usr/local/harbor/
docker-compose down -v
docker-compose up -d
docker ps|grep harbor
netstat -ntlp

2.需要訪問倉庫的其他節點的 daemon.json新增如下內容


##-------------------
vim /etc/docker/daemon.json
     "registry-mirrors": ["https://nr240upq.mirror.aliyuncs.com", "https://registry.docker-cn.com", "https://docker.mirrors.ustc.edu.cn", "https://dockerhub.azk8s.cn"],
    "insecure-registries": ["192.168.1.77:80"],
 
##-------------------



#重啟
systemctl daemon-reload && systemctl restart docker

3.節點使用倉庫

 
#登入倉庫網站
 
docker login -u admin -p Harbor12345 192.168.1.77:80
 
#下載映象
docker pull daocloud.io/library/nginx:1.9.1
 
#給映象打上標籤
docker tag daocloud.io/library/nginx:1.9.1 192.168.1.77:80/library/nginx:1.9.1
 
#映象上傳
docker push 192.168.1.77:80/library/nginx:1.9.1
 
#刪除映象
docker rmi 192.168.1.77:80/library/nginx:1.9.1
 
#將映象儲存為本地tar檔案,
docker save k8s.gcr.io/coredns:1.7.0  > /root/coredns-v1.7.0.tar 

 
#使用load載入tar檔案
docker load -i  /root/coredns-v1.7.0.tar


4.批量打包上傳harbor映象


cd /root
#檢視伺服器映象名稱
docker images | awk 'NR!=1{print $1":"$2}' > 01-image-old.txt && cat 01-image-old.txt

# /換成-
rm -rf  02-image-sed.txt && cp 01-image-old.txt 02-image-sed.txt && sed -i  "s|/|-|g" 02-image-sed.txt  && cat /root/02-image-sed.txt


#打標籤harbor倉庫

vim /root/03-tar-image.sh
#####################################################
#!/bin/sh
old=/root/01-image-old.txt
new=/root/02-image-sed.txt
l=$(cat /root/01-image-old.txt| wc -l)
for ((i=1 ; i<=$l ; i++))
do
a=$(sed -n "$i"p $old)
b=$(sed -n "$i"p $new)
#echo "update xxxx  set uid='$a' where uid='$b';"
docker tag $a 192.168.1.77:80/library/$b
done
#####################################################


#執行打倉庫標籤
bash /root/03-tar-image.sh

docker images |grep library



#檢視打標harbor倉庫images名稱
docker images |grep 192.168.1.77 | awk '{print $1":"$2}'  > 04-tar-image.txt && cat 04-tar-image.txt

#上傳到harbor倉庫
for h in `cat 04-tar-image.txt`; do docker push $h; done



#刪除打標映象
for d in `cat 04-tar-image.txt`; do docker rmi $d; done
docker images |grep library

#刪除建立的檔案
rm -rf /root/0*txt  03-tar-image.sh

六、kuboard介面管理

1.下載地址

curl -o kuboard-v3.yaml https://addons.kuboard.cn/kuboard/kuboard-v3-storage-class.yaml

2.編輯yaml

#編輯 kuboard-v3.yaml 檔案中的配置,該部署檔案中,有1處配置必須修改:storageClassName


  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      # 請填寫一個有效的 StorageClass name
      storageClassName: nfs-boge
      accessModes: [ "ReadWriteMany" ]
      resources:
        requests:
          storage: 5Gi

3.執行

kubectl create -f kuboard-v3.yaml

kubectl get pod -n kuboard

############################################

#訪問
http://192.168.1.60:30080/
輸入初始使用者名稱和密碼,並登入
    使用者名稱: admin
    密碼: Kuboard123
#############################################	

#檢視錯誤
journalctl -f -u kubelet.service

七、helm3安裝

1.helm包下載地址

 wget https://get.helm.sh/helm-v3.6.1-linux-amd64.tar.gz

2.安裝helm

#解壓 && 移動到 /usr/bin 目錄下:

tar -xvf helm-v3.6.1-linux-amd64.tar.gz && cd linux-amd64/ && mv helm /usr/bin 


#檢視版本
helm version

3.配置倉庫

#新增公用的倉庫
helm repo add incubator https://charts.helm.sh/incubator
helm repo add bitnami https://charts.bitnami.com/bitnami
# 配置helm微軟源地址
helm repo add stable http://mirror.azure.cn/kubernetes/charts
# 配置helm阿里源地址
helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts

helm repo add stable   https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm repo add google  https://kubernetes-charts.storage.googleapis.com
helm repo add jetstack https://charts.jetstack.io

# 檢視倉庫
helm repo list
# 更新倉庫
helm repo update  

# 刪除倉庫
#helm repo remove  aliyun


# helm list

八、haproxy+keepalived+ingress

1.部署阿里雲ingress

mkdir -p /data/k8s/

cd /data/k8s/
 
cat > /data/k8s/aliyun-ingress-nginx.yaml <<EOF
apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-controller
  labels:
    app: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
      - namespaces
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses/status
    verbs:
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-controller
  labels:
    app: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-controller
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-controller
    namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: ingress-nginx
  name: nginx-ingress-lb
  namespace: ingress-nginx
spec:
  # DaemonSet need:
  # ----------------
  type: ClusterIP
  # ----------------
  # Deployment need:
  # ----------------
#  type: NodePort
  # ----------------
  ports:
  - name: http
    port: 80
    targetPort: 80
    protocol: TCP
  - name: https
    port: 443
    targetPort: 443
    protocol: TCP
  - name: metrics
    port: 10254
    protocol: TCP
    targetPort: 10254
  selector:
    app: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app: ingress-nginx
data:
  keep-alive: "75"
  keep-alive-requests: "100"
  upstream-keepalive-connections: "10000"
  upstream-keepalive-requests: "100"
  upstream-keepalive-timeout: "60"
  allow-backend-server-header: "true"
  enable-underscores-in-headers: "true"
  generate-request-id: "true"
  http-redirect-code: "301"
  ignore-invalid-headers: "true"
  log-format-upstream: '{"@timestamp": "$time_iso8601","remote_addr": "$remote_addr","x-forward-for": "$proxy_add_x_forwarded_for","request_id": "$req_id","remote_user": "$remote_user","bytes_sent": $bytes_sent,"request_time": $request_time,"status": $status,"vhost": "$host","request_proto": "$server_protocol","path": "$uri","request_query": "$args","request_length": $request_length,"duration": $request_time,"method": "$request_method","http_referrer": "$http_referer","http_user_agent":  "$http_user_agent","upstream-sever":"$proxy_upstream_name","proxy_alternative_upstream_name":"$proxy_alternative_upstream_name","upstream_addr":"$upstream_addr","upstream_response_length":$upstream_response_length,"upstream_response_time":$upstream_response_time,"upstream_status":$upstream_status}'
  max-worker-connections: "65536"
  worker-processes: "2"
  proxy-body-size: 20m
  proxy-connect-timeout: "10"
  proxy_next_upstream: error timeout http_502
  reuse-port: "true"
  server-tokens: "false"
  ssl-ciphers: ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
  ssl-protocols: TLSv1 TLSv1.1 TLSv1.2
  ssl-redirect: "false"
  worker-cpu-affinity: auto
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
  labels:
    app: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
  labels:
    app: ingress-nginx
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app: ingress-nginx
  annotations:
    component.version: "v0.30.0"
    component.revision: "v1"
spec:
  # Deployment need:
  # ----------------
#  replicas: 1
  # ----------------
  selector:
    matchLabels:
      app: ingress-nginx
  template:
    metadata:
      labels:
        app: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
        scheduler.alpha.kubernetes.io/critical-pod: ""
    spec:
      # DaemonSet need:
      # ----------------
      hostNetwork: true
      # ----------------
      serviceAccountName: nginx-ingress-controller
      priorityClassName: system-node-critical
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - podAffinityTerm:
              labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values:
                  - ingress-nginx
              topologyKey: kubernetes.io/hostname
            weight: 100
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: type
                operator: NotIn
                values:
                - virtual-kubelet
      containers:
        - name: nginx-ingress-controller
          image: registry.cn-beijing.aliyuncs.com/acs/aliyun-ingress-controller:v0.30.0.2-9597b3685-aliyun
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/nginx-ingress-lb
            - --annotations-prefix=nginx.ingress.kubernetes.io
            - --enable-dynamic-certificates=true
            - --v=2
          securityContext:
            allowPrivilegeEscalation: true
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            runAsUser: 101
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
            - name: https
              containerPort: 443
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
#          resources:
#            limits:
#              cpu: "1"
#              memory: 2Gi
#            requests:
#              cpu: "1"
#              memory: 2Gi
          volumeMounts:
          - mountPath: /etc/localtime
            name: localtime
            readOnly: true
      volumes:
      - name: localtime
        hostPath:
          path: /etc/localtime
          type: File
      nodeSelector:
        boge/ingress-controller-ready: "true"
      tolerations:
      - operator: Exists
      initContainers:
      - command:
        - /bin/sh
        - -c
        - |
          mount -o remount rw /proc/sys
          sysctl -w net.core.somaxconn=65535
          sysctl -w net.ipv4.ip_local_port_range="1024 65535"
          sysctl -w fs.file-max=1048576
          sysctl -w fs.inotify.max_user_instances=16384
          sysctl -w fs.inotify.max_user_watches=524288
          sysctl -w fs.inotify.max_queued_events=16384
        image: registry.cn-beijing.aliyuncs.com/acs/busybox:v1.29.2
        imagePullPolicy: Always
        name: init-sysctl
        securityContext:
          privileged: true
          procMount: Default
---
## Deployment need for aliyun'k8s:
#apiVersion: v1
#kind: Service
#metadata:
#  annotations:
#    service.beta.kubernetes.io/alibaba-cloud-loadbalancer-id: "lb-xxxxxxxxxxxxxxxxxxx"
#    service.beta.kubernetes.io/alibaba-cloud-loadbalancer-force-override-listeners: "true"
#  labels:
#    app: nginx-ingress-lb
#  name: nginx-ingress-lb-local
#  namespace: ingress-nginx
#spec:
#  externalTrafficPolicy: Local
#  ports:
#  - name: http
#    port: 80
#    protocol: TCP
#    targetPort: 80
#  - name: https
#    port: 443
#    protocol: TCP
#    targetPort: 443
#  selector:
#    app: ingress-nginx
#  type: LoadBalancer
EOF

 

kubectl  apply -f /data/k8s/aliyun-ingress-nginx.yaml

2.節點打標籤

#允許節點打標籤
kubectl label node k8s-master01  boge/ingress-controller-ready=true
kubectl label node k8s-master02  boge/ingress-controller-ready=true
kubectl label node k8s-master03  boge/ingress-controller-ready=true

#刪除標籤
#kubectl label node k8s-master01  boge/ingress-controller-ready=true --overwrite
#kubectl label node k8s-master02  boge/ingress-controller-ready=true --overwrite
#kubectl label node k8s-master03  boge/ingress-controller-ready=true --overwrite

3.haproxy+keepalived部署

3.0 部署

yum install haproxy keepalived -y

#重啟程式
systemctl restart haproxy.service
systemctl restart keepalived.service


# 檢視執行狀態
systemctl status haproxy.service 
systemctl status keepalived.service

#開機自啟動
systemctl  enable keepalived.service
systemctl  enable haproxy.service 

3.1 修改配置haproxy

vim /etc/haproxy/haproxy.cfg
###################################################
listen ingress-http
        bind 0.0.0.0:80
        mode tcp
        option tcplog
        option dontlognull
        option dontlog-normal
        balance roundrobin
        server 192.168.1.60 192.168.1.60:80 check inter 2000 fall 2 rise 2 weight 1
        server 192.168.1.61 192.168.1.61:80 check inter 2000 fall 2 rise 2 weight 1
        server 192.168.1.62 192.168.1.62:80 check inter 2000 fall 2 rise 2 weight 1
	
 
listen ingress-https
        bind 0.0.0.0:443
        mode tcp
        option tcplog
        option dontlognull
        option dontlog-normal
        balance roundrobin
        server 192.168.1.60 192.168.1.60:443 check inter 2000 fall 2 rise 2 weight 1
        server 192.168.1.61 192.168.1.61:443 check inter 2000 fall 2 rise 2 weight 1
        server 192.168.1.62 192.168.1.62:443 check inter 2000 fall 2 rise 2 weight 1

3.2 A機器修改keepalived配置

cat > /etc/keepalived/keepalived.conf <<EOF
global_defs {
    router_id lb-master
}
vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 5
    weight -60
}
vrrp_instance VI-kube-master {
    state MASTER
    priority 120
    unicast_src_ip 192.168.1.63	  	#本機ip
    unicast_peer {
        192.168.1.64  				#另一臺機器ip
    }
    dont_track_primary
    interface ens33  				# 注意這裡的網絡卡名稱修改成你機器真實的內網網絡卡名稱,可用命令ip addr檢視
    virtual_router_id 111
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.1.100 				#vip  地址
    }
}
EOF

3.3 B機器修改keepalived配置

cat > /etc/keepalived/keepalived.conf <<EOF
global_defs {
    router_id lb-master
}
vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 5
    weight -60
}
vrrp_instance VI-kube-master {
    state MASTER
    priority 120
    unicast_src_ip 192.168.1.64	  	#本機ip
    unicast_peer {
        192.168.1.63  				#另一臺機器ip
    }
    dont_track_primary
    interface ens33  				# 注意這裡的網絡卡名稱修改成你機器真實的內網網絡卡名稱,可用命令ip addr檢視
    virtual_router_id 111
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.1.100 				#vip  地址
    }
}
EOF

3.4 重啟


#重啟程式
systemctl restart haproxy.service
systemctl restart keepalived.service


# 檢視執行狀態
systemctl status haproxy.service 
systemctl status keepalived.service

4.部署nginx-ingress

cat > /root/nginx-ingress.yaml <<EOF
apiVersion: v1
kind: Service
metadata:
  namespace: test
  name: nginx
  labels:
    app: nginx
spec:
  ports:
    - port: 80
      protocol: TCP
      targetPort: 80
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: test
  name: nginx
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: nginx
          ports:
            - containerPort: 80
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  namespace: test
  name: nginx-ingress
spec:
  rules:
    - host: nginx.boge.com
      http:
        paths:
          - backend:
              serviceName: nginx
              servicePort: 80
            path: /
EOF


5.測試nginx-ingress

  
kubectl apply -f /root/nginx-ingress.yaml
#檢視建立的ingress資源
kubectl get ingress -A





#伺服器新增域名解析
echo "192.168.1.100 nginx.boge.com" >> /etc/hosts


# 我們在其它節點上,加下本地hosts,來測試下效果
20.6.1.226 nginx.boge.com



#測試
curl nginx.boge.com  

九、elk日誌監控

1.建立測試tomcat

cat > 01-tomcat-test.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: tomcat
  name: tomcat
spec:
  replicas: 1
  selector:
    matchLabels:
      app: tomcat
  template:
    metadata:
      labels:
        app: tomcat
    spec:
      tolerations:
      - key: "node-role.kubernetes.io/master"
        effect: "NoSchedule"
      containers:
      - name: tomcat
        image: "tomcat:7.0"
        env:      # 注意點一,新增相應的環境變數(下面收集了兩塊日誌1、stdout 2、/usr/local/tomcat/logs/catalina.*.log)
        - name: aliyun_logs_tomcat-syslog   # 如日誌傳送到es,那index名稱為 tomcat-syslog
          value: "stdout"
        - name: aliyun_logs_tomcat-access   # 如日誌傳送到es,那index名稱為 tomcat-access
          value: "/usr/local/tomcat/logs/catalina.*.log"
        volumeMounts:   # 注意點二,對pod內要收集的業務日誌目錄需要進行共享,可以收集多個目錄下的日誌檔案
          - name: tomcat-log
            mountPath: /usr/local/tomcat/logs
      volumes:
        - name: tomcat-log
          emptyDir: {}
EOF



kubectl apply -f 01-tomcat-test.yaml


2.部署elasticsearch

cat > 02-elasticsearch.6.8.13-statefulset.yaml <<EOF
apiVersion: apps/v1
kind: StatefulSet
metadata:
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
    k8s-app: elasticsearch-logging
    version: v6.8.13
  name: elasticsearch-logging
  namespace: logging
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: elasticsearch-logging
      version: v6.8.13
  serviceName: elasticsearch-logging
  template:
    metadata:
      labels:
        k8s-app: elasticsearch-logging
        version: v6.8.13
    spec:
#      nodeSelector:
#        esnode: "true"  ## 注意給想要執行到的node打上相應labels
      containers:
      - env:
        - name: NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: cluster.name
          value: elasticsearch-logging-0
        - name: ES_JAVA_OPTS
          value: "-Xms512m -Xmx512m"
        image: elastic/elasticsearch:6.8.13
        name: elasticsearch-logging
        ports:
        - containerPort: 9200
          name: db
          protocol: TCP
        - containerPort: 9300
          name: transport
          protocol: TCP
        volumeMounts:
        - mountPath: /usr/share/elasticsearch/data
          name: elasticsearch-logging
      dnsConfig:
        options:
        - name: single-request-reopen
      initContainers:
      - command:
        - /bin/sysctl
        - -w
        - vm.max_map_count=262144
        image: busybox
        imagePullPolicy: IfNotPresent
        name: elasticsearch-logging-init
        resources: {}
        securityContext:
          privileged: true
      - name: fix-permissions
        image: busybox
        command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
        securityContext:
          privileged: true
        volumeMounts:
        - name: elasticsearch-logging
          mountPath: /usr/share/elasticsearch/data
      volumes:
      - name: elasticsearch-logging
        hostPath:
          path: /esdata
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: elasticsearch-logging
  name: elasticsearch
  namespace: logging
spec:
  ports:
  - port: 9200
    protocol: TCP
    targetPort: db
  selector:
    k8s-app: elasticsearch-logging
  type: ClusterIP
  
  
  
  
  
kubectl apply -f 02-elasticsearch.6.8.13-statefulset.yaml

3.部署kibana

cat > 03-kibana.6.8.13.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: logging
  labels:
    app: kibana
spec:
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: elastic/kibana:6.8.13
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        env:
          - name: ELASTICSEARCH_URL
            value: http://elasticsearch:9200
        ports:
        - containerPort: 5601
---
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: logging
  labels:
    app: kibana
spec:
  ports:
  - port: 5601
    protocol: TCP
    targetPort: 5601
  type: ClusterIP
  selector:
    app: kibana
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kibana
  namespace: logging
spec:
  rules:
  - host: kibana.boge.com
    http:
      paths:
      - path: /
        backend:
          serviceName: kibana
          servicePort: 5601



kubectl apply -f 03-kibana.6.8.13.yaml



4.部署log-pilot

cat > 04-log-pilot.yml <<EOF
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: log-pilot
  namespace: logging
  labels:
    app: log-pilot
  # 設定期望部署的namespace

spec:
  selector:
    matchLabels:
      app: log-pilot
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: log-pilot
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      # 是否允許部署到Master節點上
      #tolerations:
      #- key: node-role.kubernetes.io/master
      #  effect: NoSchedule
      containers:
      - name: log-pilot
        # 版本請參考https://github.com/AliyunContainerService/log-pilot/releases
        image: registry.cn-hangzhou.aliyuncs.com/acs/log-pilot:0.9.7-filebeat
        resources:
          limits:
            memory: 500Mi
          requests:
            cpu: 200m
            memory: 200Mi
        env:
          - name: "NODE_NAME"
            valueFrom:
              fieldRef:
                fieldPath: spec.nodeName
          ##--------------------------------
#          - name: "LOGGING_OUTPUT"
#            value: "logstash"
#          - name: "LOGSTASH_HOST"
#            value: "logstash-g1"
#          - name: "LOGSTASH_PORT"
#            value: "5044"
          ##--------------------------------
          - name: "LOGGING_OUTPUT"
            value: "elasticsearch"
          ## 請確保叢集到ES網路可達
          - name: "ELASTICSEARCH_HOSTS"
            value: "elasticsearch:9200"
          ## 配置ES訪問許可權
          #- name: "ELASTICSEARCH_USER"
          #  value: "{es_username}"
          #- name: "ELASTICSEARCH_PASSWORD"
          #  value: "{es_password}"
          ##--------------------------------
          ## https://github.com/AliyunContainerService/log-pilot/blob/master/docs/filebeat/docs.md
          ## to file need configure 1
#          - name: LOGGING_OUTPUT
#            value: file
#          - name: FILE_PATH
#            value: /tmp
#          - name: FILE_NAME
#            value: filebeat.log
        volumeMounts:
        - name: sock
          mountPath: /var/run/docker.sock
        - name: root
          mountPath: /host
          readOnly: true
        - name: varlib
          mountPath: /var/lib/filebeat
        - name: varlog
          mountPath: /var/log/filebeat
        - name: localtime
          mountPath: /etc/localtime
          readOnly: true
         ## to file need configure 2
#        - mountPath: /tmp
#          name: mylog
        livenessProbe:
          failureThreshold: 3
          exec:
            command:
            - /pilot/healthz
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 2
        securityContext:
          capabilities:
            add:
            - SYS_ADMIN
      terminationGracePeriodSeconds: 30
      volumes:
      - name: sock
        hostPath:
          path: /var/run/docker.sock
      - name: root
        hostPath:
          path: /
      - name: varlib
        hostPath:
          path: /var/lib/filebeat
          type: DirectoryOrCreate
      - name: varlog
        hostPath:
          path: /var/log/filebeat
          type: DirectoryOrCreate
      - name: localtime
        hostPath:
          path: /etc/localtime
       ## to file need configure 3
#      - hostPath:
#          path: /tmp/mylog
#          type: ""
#        name: mylog


kubectl apply -f 04-log-pilot.yml


5.配置kibana頁面

Managenment>index Patterns>Create index pattern

#建立日誌
Create index pattern> index pattern(tomcat-access*)>Next step

#建立時間
Time Filter field name(@timestamp)>Create index pattern

#檢視日誌展示
Discover>tomcat-access*
————————————————
版權宣告:本文為CSDN博主「大蝦別跑」的原創文章,遵循CC 4.0 BY-SA版權協議,轉載請附上原文出處連結及本宣告。
原文連結:https://blog.csdn.net/qq_35583325/article/details/128172276

十、Prometheus監控

1.匯入離線包

連結:https://pan.baidu.com/s/1DyMJPT8r_TUpI8Dr31SVew?pwd=m1bk 
提取碼:m1bk


#匯入上傳tar包
sudo docker load -i alertmanager-v0.21.0.tar
sudo docker load -i grafana-7.3.4.tar
sudo docker load -i k8s-prometheus-adapter-v0.8.2.tar
sudo docker load -i kube-rbac-proxy-v0.8.0.tar
sudo docker load -i kube-state-metrics-v1.9.7.tar
sudo docker load -i node-exporter-v1.0.1.tar
sudo docker load -i prometheus-config-reloader-v0.43.2.tar
sudo docker load -i prometheus_demo_service.tar
sudo docker load -i prometheus-operator-v0.43.2.tar
sudo docker load -i prometheus-v2.22.1.tar

2.主節點建立


#解壓下載的程式碼包
sudo unzip kube-prometheus-master.zip
sudo rm -f kube-prometheus-master.zip && cd kube-prometheus-master


#這裡建議先看下有哪些映象,便於在下載映象快的節點上先收集好所有需要的離線docker映象
find ./ -type f |xargs grep 'image: '|sort|uniq|awk '{print $3}'|grep ^[a-zA-Z]|grep -Evw 'error|kubeRbacProxy'|sort -rn|uniq


kubectl create -f manifests/setup
kubectl create -f manifests/



#過一會檢視建立結果:
kubectl -n monitoring get all
 
 
 
# 附:清空上面部署的prometheus所有服務:
# kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup

3. 訪問下prometheus的UI

# 修改下prometheus UI的service模式,便於我們訪問
# kubectl -n monitoring patch svc prometheus-k8s -p '{"spec":{"type":"NodePort"}}'
service/prometheus-k8s patched
 
# kubectl -n monitoring get svc prometheus-k8s 
NAME             TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)          AGE
prometheus-k8s   NodePort   10.68.23.79   <none>        9090:22129/TCP   7m43s
 

3.1 修改使用者許可權

#   kubectl edit clusterrole prometheus-k8s
#------ 原始的rules -------
rules:
- apiGroups:
  - ""
  resources:
  - nodes/metrics
  verbs:
  - get
- nonResourceURLs:
  - /metrics
  verbs:
  - get
#---------------------------

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: prometheus-k8s
rules:
- apiGroups:
  - ""
  resources:
  - nodes
  - services
  - endpoints
  - pods
  - nodes/proxy
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - configmaps
  - nodes/metrics
  verbs:
  - get
- nonResourceURLs:
  - /metrics
  verbs:
  - get
 

4. 監控ingress-nginx

cat > servicemonitor.yaml <<EOF
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
  labels:
    app: ingress-nginx
  name: nginx-ingress-scraping
  namespace: ingress-nginx
spec:
  endpoints:
  - interval: 30s
    path: /metrics
    port: metrics
  jobLabel: app
  namespaceSelector:
    matchNames:
    - ingress-nginx
  selector:
    matchLabels:
      app: ingress-nginx
EOF


kubectl apply -f servicemonitor.yaml


kubectl -n ingress-nginx get servicemonitors.monitoring.coreos.com

十一、安裝kubesphere3.3

官網參考文件

https://kubesphere.com.cn/docs/v3.3/pluggable-components/alerting/

1.部署kubesphere時需要預設 StorageClass

kubectl edit sc nfs-boge

  metadata:
    annotations:
      storageclass.beta.kubernetes.io/is-default-class: "true"

2.下載yaml

wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/kubesphere-installer.yaml

wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/cluster-configuration.yaml




#修改cluster-configuration.yaml
#將ectd下的 endpointIps改為你的master節點的私有IP地址。
#endpointIps: XX.X.X.X

3.執行yaml

kubectl apply -f kubesphere-installer.yaml

kubectl apply -f cluster-configuration.yaml

4. 檢視日誌

kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f


#訪問任意機器的 30880埠
#賬號 : admin
#密碼 : P@88w0rd

5. 解決etcd監控證書找不到問題

kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs  \
--from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt  \
--from-file=etcd-client.crt=/etc/kubernetes/pki/etcd/healthcheck-client.crt  \
--from-file=etcd-client.key=/etc/kubernetes/pki/etcd/healthcheck-client.key

6. 在安裝後啟用告警系統

在 cluster-configuration.yaml 檔案中,搜尋 alerting,將 enabled 的 false 更改為 true 以啟用告警系統。完成後儲存檔案

alerting:
  enabled: true # 將“false”更改為“true”。


#執行
kubectl apply -f kubesphere-installer.yaml

kubectl apply -f cluster-configuration.yaml

6.0 配置釘釘報警

6.1 釘釘自定義機器配置

新增自定義機器人,安全配置,勾選** 加簽**

6.2 操作步驟

左上角>平臺管理>平臺設定>通知管理>通知配置>釘釘>群機器人配置
開啟-已啟用
填寫自己的 Webhook URL
填寫自己的 金鑰 (加簽
傳送測試資訊
確定

檢視釘釘群訊息.是否傳送成功?????

7. 在安裝後啟用應用商店,

在該 YAML 檔案中,搜尋 openpitrix,將 enabled 的 false 改為 true。完成後,點選右下角的確定,儲存配置。

openpitrix:
  store:
    enabled: true # 將“false”更改為“true”。


#執行
kubectl apply -f kubesphere-installer.yaml

kubectl apply -f cluster-configuration.yaml

8.在安裝後啟用服務網格 istio

在該配置檔案中,搜尋 servicemesh,並將 enabled 的 false 改為 true。完成後,點選右下角的確定,儲存配置

servicemesh:
enabled: true # 將“false”更改為“true”。
istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/
  components:
    ingressGateways:
    - name: istio-ingressgateway # 將服務暴露至服務網格之外。預設不開啟。
      enabled: false
    cni:
      enabled: false # 啟用後,會在 Kubernetes pod 生命週期的網路設定階段完成 Istio 網格的 pod 流量轉發設定工作。

9.在安裝前啟用 DevOps

在該 YAML 檔案中,搜尋 devops,將 enabled 的 false 改為 true。完成後,點選右下角的確定,儲存配置。

devops:
  enabled: true # 將“false”更改為“true”。

10. 解除安裝方法


kubectl delete -f cluster-configuration.yaml --force
kubectl delete -f kubesphere-installer.yaml --force

#刪除殘餘檔案
vi del.sh

#!/usr/bin/env bash
 
function delete_sure(){
  cat << eof
$(echo -e "\033[1;36mNote:\033[0m")
Delete the KubeSphere cluster, including the module kubesphere-system kubesphere-devops-system kubesphere-devops-worker kubesphere-monitoring-system kubesphere-logging-system openpitrix-system.
eof
 
read -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
while [[ "x"$ans != "xyes" && "x"$ans != "xno" ]]; do
    read -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
done
 
if [[ "x"$ans == "xno" ]]; then
    exit
fi
}
 
 
delete_sure
 
# delete ks-installer
kubectl delete deploy ks-installer -n kubesphere-system 2>/dev/null
 
# delete helm
for namespaces in kubesphere-system kubesphere-devops-system kubesphere-monitoring-system kubesphere-logging-system openpitrix-system kubesphere-monitoring-federated
do
  helm list -n $namespaces | grep -v NAME | awk '{print $1}' | sort -u | xargs -r -L1 helm uninstall -n $namespaces 2>/dev/null
done
 
# delete kubefed
kubectl get cc -n kubesphere-system ks-installer -o jsonpath="{.status.multicluster}" | grep enable
if [[ $? -eq 0 ]]; then
  # delete kubefed types resources
  for kubefed in `kubectl api-resources --namespaced=true --api-group=types.kubefed.io -o name`
  do
    kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
  done
  for kubefed in `kubectl api-resources --namespaced=false --api-group=types.kubefed.io -o name`
  do
    kubectl delete $kubefed --all 2>/dev/null
  done
  # delete kubefed core resouces
  for kubefed in `kubectl api-resources --namespaced=true --api-group=core.kubefed.io -o name`
  do
    kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
  done
  for kubefed in `kubectl api-resources --namespaced=false --api-group=core.kubefed.io -o name`
  do
    kubectl delete $kubefed --all 2>/dev/null
  done
  # uninstall kubefed chart
  helm uninstall -n kube-federation-system kubefed 2>/dev/null
fi
 
 
helm uninstall -n kube-system snapshot-controller 2>/dev/null
 
# delete kubesphere deployment & statefulset
kubectl delete deployment -n kubesphere-system `kubectl get deployment -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
kubectl delete statefulset -n kubesphere-system `kubectl get statefulset -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
 
# delete monitor resources
kubectl delete prometheus -n kubesphere-monitoring-system k8s 2>/dev/null
kubectl delete Alertmanager -n kubesphere-monitoring-system main 2>/dev/null
kubectl delete DaemonSet -n kubesphere-monitoring-system node-exporter 2>/dev/null
kubectl delete statefulset -n kubesphere-monitoring-system `kubectl get statefulset -n kubesphere-monitoring-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
 
# delete grafana
kubectl delete deployment -n kubesphere-monitoring-system grafana 2>/dev/null
kubectl --no-headers=true get pvc -n kubesphere-monitoring-system -o custom-columns=:metadata.namespace,:metadata.name | grep -E kubesphere-monitoring-system | xargs -n2 kubectl delete pvc -n 2>/dev/null
 
# delete pvc
pvcs="kubesphere-system|openpitrix-system|kubesphere-devops-system|kubesphere-logging-system"
kubectl --no-headers=true get pvc --all-namespaces -o custom-columns=:metadata.namespace,:metadata.name | grep -E $pvcs | xargs -n2 kubectl delete pvc -n 2>/dev/null
 
 
# delete rolebindings
delete_role_bindings() {
  for rolebinding in `kubectl -n $1 get rolebindings -l iam.kubesphere.io/user-ref -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl -n $1 delete rolebinding $rolebinding 2>/dev/null
  done
}
 
# delete roles
delete_roles() {
  kubectl -n $1 delete role admin 2>/dev/null
  kubectl -n $1 delete role operator 2>/dev/null
  kubectl -n $1 delete role viewer 2>/dev/null
  for role in `kubectl -n $1 get roles -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl -n $1 delete role $role 2>/dev/null
  done
}
 
# remove useless labels and finalizers
for ns in `kubectl get ns -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl label ns $ns kubesphere.io/workspace-
  kubectl label ns $ns kubesphere.io/namespace-
  kubectl patch ns $ns -p '{"metadata":{"finalizers":null,"ownerReferences":null}}'
  delete_role_bindings $ns
  delete_roles $ns
done
 
# delete clusterroles
delete_cluster_roles() {
  for role in `kubectl get clusterrole -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl delete clusterrole $role 2>/dev/null
  done
 
  for role in `kubectl get clusterroles | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
  do
    kubectl delete clusterrole $role 2>/dev/null
  done
}
delete_cluster_roles
 
# delete clusterrolebindings
delete_cluster_role_bindings() {
  for rolebinding in `kubectl get clusterrolebindings -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl delete clusterrolebindings $rolebinding 2>/dev/null
  done
 
  for rolebinding in `kubectl get clusterrolebindings | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
  do
    kubectl delete clusterrolebindings $rolebinding 2>/dev/null
  done
}
delete_cluster_role_bindings
 
# delete clusters
for cluster in `kubectl get clusters -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch cluster $cluster -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete clusters --all 2>/dev/null
 
# delete workspaces
for ws in `kubectl get workspaces -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch workspace $ws -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspaces --all 2>/dev/null
 
# make DevOps CRs deletable
for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do
    for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do
        for devops_res in $(kubectl get $devops_crd -n $ns -oname); do
            kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge
        done
    done
done
 
# delete validatingwebhookconfigurations
for webhook in ks-events-admission-validate users.iam.kubesphere.io network.kubesphere.io validating-webhook-configuration resourcesquotas.quota.kubesphere.io
do
  kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done
 
# delete mutatingwebhookconfigurations
for webhook in ks-events-admission-mutate logsidecar-injector-admission-mutate mutating-webhook-configuration
do
  kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done
 
# delete users
for user in `kubectl get users -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch user $user -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete users --all 2>/dev/null
 
 
# delete helm resources
for resource_type in `echo helmcategories helmapplications helmapplicationversions helmrepos helmreleases`; do
  for resource_name in `kubectl get ${resource_type}.application.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`; do
    kubectl patch ${resource_type}.application.kubesphere.io ${resource_name} -p '{"metadata":{"finalizers":null}}' --type=merge
  done
  kubectl delete ${resource_type}.application.kubesphere.io --all 2>/dev/null
done
 
# delete workspacetemplates
for workspacetemplate in `kubectl get workspacetemplates.tenant.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch workspacetemplates.tenant.kubesphere.io $workspacetemplate -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspacetemplates.tenant.kubesphere.io --all 2>/dev/null
 
# delete federatednamespaces in namespace kubesphere-monitoring-federated
for resource in $(kubectl get federatednamespaces.types.kubefed.io -n kubesphere-monitoring-federated -oname); do
  kubectl patch "${resource}" -p '{"metadata":{"finalizers":null}}' --type=merge -n kubesphere-monitoring-federated
done
 
# delete crds
for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`
do
  if [[ $crd == *kubesphere.io ]] || [[ $crd == *kubefed.io ]] ; then kubectl delete crd $crd 2>/dev/null; fi
done
 
# delete relevance ns
for ns in kube-federation-system kubesphere-alerting-system kubesphere-controls-system kubesphere-devops-system kubesphere-devops-worker kubesphere-logging-system kubesphere-monitoring-system kubesphere-monitoring-federated openpitrix-system kubesphere-system
do
  kubectl delete ns $ns 2>/dev/null
done

#執行刪除
sh del.sh

十二、 GitLab安裝

1. 單獨準備伺服器,採用Docker安裝

docker search gitlab
docker pull gitlab/gitlab-ce

2.準備docker-compose.yml檔案


mkdir -p /data/git

vim /data/git/docker-compose.yml

version: '3.1'
services:
  gitlab:
    image: 'gitlab/gitlab-ce:latest'
    container_name: gitlab
    restart: always
    environment:
      GITLAB_OMNIBUS_CONFIG: |
        external_url 'http://10.1.100.225:8929'#自己安裝git的伺服器IP
        gitlab_rails['gitlab_shell_ssh_port'] = 2224
    ports:
      - '8929:8929'
      - '2224:2224'
    volumes:
      - './config:/etc/gitlab'
      - './logs:/var/log/gitlab'
      - './data:/var/opt/gitlab'


3.啟動容器(需要稍等很久……)

cd /data/git
docker-compose up -d

4.訪問GitLab首頁

http://10.1.100.225:8929

5.檢視root使用者初始密碼

docker exec -it gitlab cat /etc/gitlab/initial_root_password

6.第一次登入網頁,需要修改密碼 Password

**右上角>>**Administrator>Preferences>Password

十三、DevOps初始化環境

1.linux系統 安裝Jenkins、jdk 、maven

1.下載地址

JDK包下載地址
https://www.oracle.com/java/technologies/downloads/

MAven下載地址
https://maven.apache.org/download.cgi

2.安裝jdk maven

tar -zxvf jdk-8*.tar.gz -C /usr/local/
tar -zxvf apache-maven-*.tar.gz -C /usr/local/

cd /usr/local
mv apache-maven*/ maven
mv jdk1.8*/ jdk

2.1 編輯maven配置

vim /usr/local/maven/conf/settings.xml

 
<!--#maven配置阿里雲倉庫,在160行插入-->
<mirror>
    <id>nexus-aliyun</id>
    <mirrorOf>central</mirrorOf>
    <name>Nexus aliyun</name>
    <url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
 
 
<!--#maven配置jdk,在252行插入-->
<profile>    
     <id>jdk1.8</id>    
     <activation>    
         <activeByDefault>true</activeByDefault>    
         <jdk>1.8</jdk>    
    </activation>    
    <properties>    
    	<maven.compiler.source>1.8</maven.compiler.source>    
    	<maven.compiler.target>1.8</maven.compiler.target>    
        <maven.compiler.compilerVersion>1.8</maven.compiler.compilerVersion>    
    </properties>     
</profile> 
 
<!--#maven配置jdk,在257行插入-->
   <activeProfiles>
      <activeProfile>jdk1.8</activeProfile>
   </activeProfiles>
 

3.安裝jenkins

3.1 下載

docker pull jenkins/jenkins:2.319.1-lts

3.2 建立yaml

mkdir -p /data/jenkins/
cd /data/jenkins/
vim /data/jenkins/docker-compose.yml

version: "3.1"
services:
  jenkins:
    image: jenkins/jenkins
    container_name: jenkins
    ports:
      - 8080:8080
      - 50000:50000
    volumes:
      - ./data/:/var/jenkins_home/
      - /var/run/docker.sock:/var/run/docker.sock
      - /usr/bin/docker:/usr/bin/docker
      - /etc/docker/daemon.json:/etc/docker/daemon.json

3.3 啟動jenkins

 
#修改Jenkins使用者許可權
cd /var/run
 
chown root:root docker.sock
 
#其他使用者有讀和寫許可權
chmod o+rw docker.sock
 
 
cd /data/jenkins/
docker-compose up -d
 
#授權
chmod 777 /data/jenkins/data/
 
cat /data/jenkins/data/hudson.model.UpdateCenter.xml
#重新啟動Jenkins容器後,由於Jenkins需要下載大量內容,但是由於預設下載地址下載速度較慢,
#需要重新設定下載地址為國內映象站# 清華大學的外掛源也可以
# 修改資料卷中的hudson.model.UpdateCenter.xml檔案
# 將下載地址替換為http://mirror.esuni.jp/jenkins/updates/update-center.json
 
# 清華大學的外掛源也可以
#https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
 
#重啟
docker-compose restart
 
#檢視日誌
docker logs -f jenkins

3.4 訪問頁面,安裝外掛

http://10.1.100.225:8080
1.輸入密碼2.選擇外掛來安裝3.點選安裝

4.jenkins外掛安裝

中文介面>系統管理>外掛管理>可選外掛>搜尋外掛
英文介面> Manage Jenkins–Manage Plugins-Available>搜尋外掛
Locale
Localization
Git Parameter
Publish Over SSH

5. 配置jenkins

mv /usr/local/maven/ /data/jenkins/data/
mv /usr/local/jdk/ /data/jenkins/data/

5.1 載入本地jdk

Dashboard>系統管理>全域性工具配置>Add JDK>去掉對鉤 (√)自動安裝
NAME

jdk8

JAVA_HOME

/var/jenkins_home/jdk/

5.1 載入本地maven

Dashboard>系統管理>全域性工具配置>Add Maven>去掉對鉤 (√)自動安裝
NAME

maven

JAVA_HOME

/var/jenkins_home/maven/

Save Apply
儲存 應用

執行mvn測試
mvn help:system

3.jenkins拉取測試

系統管理>系統配置>Publish over SSH>SSH Servers>Add

#自定義專案名稱
name

test

#主機IP
Hostname

10.1.100.25

#主機使用者名稱
Username

root

#拉取專案路徑
Remote Directory

/data/work/mytest

點選高階
√ Use password authentication, or use a different key

#輸入伺服器密碼
Passphrase / Password

xxxx

#點選 測試

Test Configuration


Save Apply
儲存 應用

4.Jenkins伺服器設定免密登入k8s-mast伺服器

#Jenkins伺服器-進入jenkins容器
docker exec -it jenkins bash

#進入jenkins容器-生成免密登入公私鑰,根據提示按回車
ssh-keygen -t rsa

#進入jenkins容器-檢視jenkins 祕鑰
cat /var/jenkins_home/.ssh/id_rsa.pub

#k8s-mast伺服器中authorized_keys 加入Jenkins伺服器祕鑰
echo “xxxxxx” >> /root/.ssh/authorized_keys

十四、開發環境部署IDEA

工具下載:

連結:https://pan.baidu.com/s/1Jkyh_kgrT2o388Xiujbdeg?pwd=b7rx
提取碼:b7rx

1. windows配置maven 和jdk

https://blog.csdn.net/weixin_46565024/article/details/122758111

2. IDEA簡單得專案建立

File>New>Project

Spring Initializr>Next

Type(選擇Maven)>Java Version (選擇8) > Next

Web> 勾選√Spring Web> Next>Finish

原文作者:「大蝦別跑」
原文連結:https://blog.csdn.net/qq_35583325/article/details/128172276

關注公眾號【OSC DevOps】閱讀更多精彩文章。

掃碼新增【開源中國源創君】微信,稍後邀請您進入DevOps學習交流群。