【云原生实战】Kubernetes上安装KubeSphere_nfs-client-provisioner国内镜像地址-CSDN博客
KubeSphere平台安装
Kubernetes上安装KubeSphere
- 能使用 KubeKey 安装的 Kubernetes 版本与 KubeSphere 3.3 支持的 Kubernetes 版本不同。如需在现有 Kubernetes 集群上安装 KubeSphere 3.3,您的 Kubernetes 版本必须为 v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。
- 带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如果您需要使用 KubeEdge,为了避免兼容性问题,建议安装 v1.21.x 版本的 Kubernetes。
安装KubeSphere前置环境
nfs文件系统
安装nfs-server
# 在每个机器。
yum install -y nfs-utils
# 在master 执行以下命令
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
# 执行以下命令,启动 nfs 服务;创建共享目录
mkdir -p /nfs/data
# 在master执行
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server
# 使配置生效
exportfs -r
#检查配置是否生效
exportfs
配置nfs-client
showmount -e 172.31.0.4
mkdir -p /nfs/data
mount -t nfs 172.31.0.4:/nfs/data /nfs/data
配置默认存储
## 创建了一个存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
archiveOnDelete: "true" ## 删除pv的时候,pv的内容是否要备份
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: yuufnn/nfs-external-provisioner:v4.0.0
# resources:
# limits:
# cpu: 10m
# requests:
# cpu: 10m
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.0.110 ## 指定自己nfs服务器地址
- name: NFS_PATH
value: /nfs/data ## nfs服务器共享的目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.0.110
path: /nfs/data
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
测试通过pvc申请一块空间
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nginx-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi
storageClassName: nfs-storage
下载kubesphere核心文件
wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml
wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml
修改cluster-configuration
在 cluster-configuration.yaml中指定我们需要开启的功能
参照官网“启用可插拔组件”
https://kubesphere.com.cn/docs/pluggable-components/overview/
修改后的cluster-configuration.yam
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.3.2
spec:
persistence:
storageClass: "" # If there is no default StorageClass in your cluster, you need to specify an existing StorageClass here.
authentication:
# adminPassword: "" # Custom password of the admin user. If the parameter exists but the value is empty, a random password is generated. If the parameter does not exist, P@88w0rd is used.
jwtSecret: "" # Keep the jwtSecret consistent with the Host Cluster. Retrieve the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the Host Cluster.
local_registry: "" # Add your private registry address if it is needed.
# dev_tag: "" # Add your kubesphere image tag you want to install, by default it's same as ks-installer release version.
etcd:
monitoring: false # Enable or disable etcd monitoring dashboard installation. You have to create a Secret for etcd before you enable it.
endpointIps: localhost # etcd cluster EndpointIps. It can be a bunch of IPs here.
port: 2379 # etcd port.
tlsEnable: true
common:
core:
console:
enableMultiLogin: true # Enable or disable simultaneous logins. It allows different users to log in with the same account at the same time.
port: 30880
type: NodePort
# apiserver: # Enlarge the apiserver and controller manager's resource requests and limits for the large cluster
# resources: {}
# controllerManager:
# resources: {}
redis:
enabled: false
enableHA: false
volumeSize: 2Gi # Redis PVC size.
openldap:
enabled: false
volumeSize: 2Gi # openldap PVC size.
minio:
volumeSize: 20Gi # Minio PVC size.
monitoring:
# type: external # Whether to specify the external prometheus stack, and need to modify the endpoint at the next line.
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 # Prometheus endpoint to get metrics data.
GPUMonitoring: # Enable or disable the GPU-related metrics. If you enable this switch but have no GPU resources, Kubesphere will set it to zero.
enabled: false
gpu: # Install GPUKinds. The default GPU kind is nvidia.com/gpu. Other GPU kinds can be added here according to your needs.
kinds:
- resourceName: "nvidia.com/gpu"
resourceType: "GPU"
default: true
es: # Storage backend for logging, events and auditing.
# master:
# volumeSize: 4Gi # The volume size of Elasticsearch master nodes.
# replicas: 1 # The total number of master nodes. Even numbers are not allowed.
# resources: {}
# data:
# volumeSize: 20Gi # The volume size of Elasticsearch data nodes.
# replicas: 1 # The total number of data nodes.
# resources: {}
logMaxAge: 7 # Log retention time in built-in Elasticsearch. It is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchHost: ""
externalElasticsearchPort: ""
alerting: # (CPU: 0.1 Core, Memory: 100 MiB) It enables users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
enabled: true # Enable or disable the KubeSphere Alerting System.
# thanosruler:
# replicas: 1
# resources: {}
auditing: # Provide a security-relevant chronological set of records,recording the sequence of activities happening on the platform, initiated by different tenants.
enabled: true # Enable or disable the KubeSphere Auditing Log System.
# operator:
# resources: {}
# webhook:
# resources: {}
devops: # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
enabled: true # Enable or disable the KubeSphere DevOps System.
# resources: {}
jenkinsMemoryLim: 4Gi # Jenkins memory limit.
jenkinsMemoryReq: 2Gi # Jenkins memory request.
jenkinsVolumeSize: 8Gi # Jenkins volume size.
events: # Provide a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
enabled: false # Enable or disable the KubeSphere Events System.
# operator:
# resources: {}
# exporter:
# resources: {}
# ruler:
# enabled: true
# replicas: 2
# resources: {}
logging: # (CPU: 57 m, Memory: 2.76 G) Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
enabled: true # Enable or disable the KubeSphere Logging System.
logsidecar:
enabled: true
replicas: 2
# resources: {}
metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) It enables HPA (Horizontal Pod Autoscaler).
enabled: true # Enable or disable metrics-server.
monitoring:
storageClass: "" # If there is an independent StorageClass you need for Prometheus, you can specify it here. The default StorageClass is used by default.
node_exporter:
port: 9100
# resources: {}
# kube_rbac_proxy:
# resources: {}
# kube_state_metrics:
# resources: {}
# prometheus:
# replicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and providing high availability.
# volumeSize: 20Gi # Prometheus PVC size.
# resources: {}
# operator:
# resources: {}
# alertmanager:
# replicas: 1 # AlertManager Replicas.
# resources: {}
# notification_manager:
# resources: {}
# operator:
# resources: {}
# proxy:
# resources: {}
gpu: # GPU monitoring-related plug-in installation.
nvidia_dcgm_exporter: # Ensure that gpu resources on your hosts can be used normally, otherwise this plug-in will not work properly.
enabled: false # Check whether the labels on the GPU hosts contain "nvidia.com/gpu.present=true" to ensure that the DCGM pod is scheduled to these nodes.
# resources: {}
multicluster:
clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the Host or Member Cluster.
network:
networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
# Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
enabled: true # Enable or disable network policies.
ippool: # Use Pod IP Pools to manage the Pod network address space. Pods to be created can be assigned IP addresses from a Pod IP Pool.
type: calico # Specify "calico" for this field if Calico is used as your CNI plugin. "none" means that Pod IP Pools are disabled.
topology: # Use Service Topology to view Service-to-Service communication based on Weave Scope.
type: none # Specify "weave-scope" for this field to enable Service Topology. "none" means that Service Topology is disabled.
openpitrix: # An App Store that is accessible to all platform tenants. You can use it to manage apps across their entire lifecycle.
store:
enabled: true # Enable or disable the KubeSphere App Store.
servicemesh: # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology.
enabled: true # Base component (pilot). Enable or disable KubeSphere Service Mesh (Istio-based).
istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/
components:
ingressGateways:
- name: istio-ingressgateway
enabled: false
cni:
enabled: false
edgeruntime: # Add edge nodes to your cluster and deploy workloads on edge nodes.
enabled: false
kubeedge: # kubeedge configurations
enabled: false
cloudCore:
cloudHub:
advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided.
- "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided.
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
# resources: {}
# hostNetWork: false
iptables-manager:
enabled: true
mode: "external"
# resources: {}
# edgeService:
# resources: {}
gatekeeper: # Provide admission policy and rule management, A validating (mutating TBA) webhook that enforces CRD-based policies executed by Open Policy Agent.
enabled: false # Enable or disable Gatekeeper.
# controller_manager:
# resources: {}
# audit:
# resources: {}
terminal:
# image: 'alpine:3.15' # There must be an nsenter program in the image
timeout: 600 # Container timeout, if set to 0, no timeout will be used. The unit is seconds
执行安装
这里步骤都在主节点执行
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml
查看安装进度
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
#查看端口
kubectl get svc/ks-console -n kubesphere-system
访问任意机器的 30880端口
账号 : admin
密码 : P@88w0rd
中间件部署
注意:
1.在创建有状态工作负载时,不要提前创建存储卷,而是在创建工作负载时创建存储卷
2.先创建存储卷,再创建工作负载和在创建工作负载时去创建存储卷的区别
若先创建存储卷,再创建工作负载,并且一个工作负载有多个容器,那么这多个容器会共用一个存储卷
在创建工作负载时去创建存储卷,那么每个容器在启动时都会创建一个自己的存储卷
1.mysql部署
1、mysql容器启动
docker run -p 3306:3306 --name mysql-01 \
-v /mydata/mysql/log:/var/log/mysql \
-v /mydata/mysql/data:/var/lib/mysql \
-v /mydata/mysql/conf:/etc/mysql/conf.d \
-e MYSQL_ROOT_PASSWORD=root \
--restart=always \
-d mysql:5.7
2、mysql配置示例
[client]
default-character-set=utf8mb4
[mysql]
default-character-set=utf8mb4
[mysqld]
init_connect='SET collation_connection = utf8mb4_unicode_ci'
init_connect='SET NAMES utf8mb4'
character-set-server=utf8mb4
collation-server=utf8mb4_unicode_ci
skip-character-set-client-handshake
#skip-name-resolve
3、mysql部署分析
1、集群内部,直接通过应用的 【服务名.项目名】 直接访问
mysql -uroot -h his-mysql-glgf.his -p
2、集群外部,
2、部署ElasticSearch
1、es容器启动
# 创建数据目录
mkdir -p /mydata/es-01 && chmod 777 -R /mydata/es-01
# 容器启动
docker run --restart=always -d -p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms512m -Xmx512m" \
-v es-config:/usr/share/elasticsearch/config \
-v /mydata/es-01/data:/usr/share/elasticsearch/data \
--name es-01 \
elasticsearch:7.13.4
2、es部署分析
注意: 子路径挂载,配置修改后,k8s不会对其Pod内的相关配置文件进行热更新,需要自己重启Pod
3、子路径挂载
在容器的/usr/share/elasticsearch/config目录下有多个文件,若不指定子路径挂载,那么在挂载时只会挂载第二步图中的两个文件;指定子路径挂载,会保留目标目录下其他的文件,而把挂载得文件进行覆盖
集群内部服务间访问
pod名.服务名.命名空间(项目名).svc.clustyer.local:8848
例:his-nacos-v1-0.his-nacos.his.svc.cluster.local:8848
启动顺序问题(健康检查)
https://v2-1.docs.kubesphere.io/docs/zh-CN/workload/health-check/
Kubernetes 学习总结(42)—— Kubernetes 之 pod 健康检查详解_pod健康检查-CSDN博客
相关配置
Dockerfile
#FROM openjdk:8-jdk
FROM registry.cn-chengdu.aliyuncs.com/wkldocker/jdk:8
LABEL maintainer=wkl
RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo 'Asia/Shanghai' >/etc/timezone
COPY target/*.jar /app.jar
EXPOSE 9200
# hospital-manage
ENTRYPOINT ["/bin/sh","-c","java -Dfile.encoding=utf8 -Djava.security.egd=file:/dev/./urandom -jar /app.jar "]
Jenkinsfile
pipeline {
agent {
node {
label 'maven'
}
}
stages {
stage('拉取代码') {
agent none
steps {
container('maven') {
git(url: 'https://gitee.com/leifengyang/yygh-parent.git', credentialsId: 'gitee-id', branch: 'master', changelog: true, poll: false)
sh 'ls -al'
}
}
}
stage('项目编译') {
agent none
steps {
container('maven') {
sh 'ls'
sh 'mvn clean package -Dmaven.test.skip=true'
sh 'ls hospital-manage/target'
}
}
}
stage('default-2') {
parallel {
stage('构建hospital-manage镜像') {
agent none
steps {
container('maven') {
sh 'ls hospital-manage/target'
sh 'docker build -t hospital-manage:latest -f hospital-manage/Dockerfile ./hospital-manage/'
}
}
}
stage('构建server-gateway镜像') {
agent none
steps {
container('maven') {
sh 'ls server-gateway/target'
sh 'docker build -t server-gateway:latest -f server-gateway/Dockerfile ./server-gateway/'
}
}
}
stage('构建service-cmn镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-cmn/target'
sh 'docker build -t service-cmn:latest -f service/service-cmn/Dockerfile ./service/service-cmn/'
}
}
}
stage('构建service-hosp镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-hosp/target'
sh 'docker build -t service-hosp:latest -f service/service-hosp/Dockerfile ./service/service-hosp/'
}
}
}
stage('构建service-order镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-order/target'
sh 'docker build -t service-order:latest -f service/service-order/Dockerfile ./service/service-order/'
}
}
}
stage('构建service-oss镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-oss/target'
sh 'docker build -t service-oss:latest -f service/service-oss/Dockerfile ./service/service-oss/'
}
}
}
stage('构建service-sms镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-sms/target'
sh 'docker build -t service-sms:latest -f service/service-sms/Dockerfile ./service/service-sms/'
}
}
}
stage('构建service-statistics镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-statistics/target'
sh 'docker build -t service-statistics:latest -f service/service-statistics/Dockerfile ./service/service-statistics/'
}
}
}
stage('构建service-task镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-task/target'
sh 'docker build -t service-task:latest -f service/service-task/Dockerfile ./service/service-task/'
}
}
}
stage('构建service-user镜像') {
agent none
steps {
container('maven') {
sh 'ls service/service-user/target'
sh 'docker build -t service-user:latest -f service/service-user/Dockerfile ./service/service-user/'
}
}
}
}
}
stage('default-3') {
parallel {
stage('推送hospital-manage镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag hospital-manage:latest $REGISTRY/$DOCKERHUB_NAMESPACE/hospital-manage:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/hospital-manage:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送server-gateway镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag server-gateway:latest $REGISTRY/$DOCKERHUB_NAMESPACE/server-gateway:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/server-gateway:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-cmn镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-cmn:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-cmn:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-cmn:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-hosp镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-hosp:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-hosp:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-hosp:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-order镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-order:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-order:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-order:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-oss镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-oss:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-oss:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-oss:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-sms镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-sms:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-sms:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-sms:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-statistics镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-statistics:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-statistics:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-statistics:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-task镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-task:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-task:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-task:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
stage('推送service-user镜像') {
agent none
steps {
container('maven') {
withCredentials([usernamePassword(credentialsId : 'aliyun-docker-registry' ,usernameVariable : 'DOCKER_USER_VAR' ,passwordVariable : 'DOCKER_PWD_VAR' ,)]) {
sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin'
sh 'docker tag service-user:latest $REGISTRY/$DOCKERHUB_NAMESPACE/service-user:SNAPSHOT-$BUILD_NUMBER'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/service-user:SNAPSHOT-$BUILD_NUMBER'
}
}
}
}
}
}
stage('default-4') {
parallel {
stage('hospital-manage - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'hospital-manage/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('server-gateway - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'server-gateway/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-cmn - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-cmn/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-hosp - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-hosp/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-order - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-order/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-oss - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-oss/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-sms - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-sms/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-statistics - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-statistics/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-task - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-task/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
stage('service-user - 部署到dev环境') {
agent none
steps {
kubernetesDeploy(configs: 'service/service-user/deploy/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
}
}
//1、配置全系统的邮件: 全系统的监控
//2、修改ks-jenkins的配置,里面的邮件; 流水线发邮件
stage('发送确认邮件') {
agent none
steps {
mail(to: '17512080612@163.com', subject: '构建结果', body: "构建成功了 $BUILD_NUMBER")
}
}
}
environment {
DOCKER_CREDENTIAL_ID = 'dockerhub-id'
GITHUB_CREDENTIAL_ID = 'github-id'
KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig'
REGISTRY = 'registry.cn-hangzhou.aliyuncs.com'
DOCKERHUB_NAMESPACE = 'lfy_hello'
GITHUB_ACCOUNT = 'kubesphere'
APP_NAME = 'devops-java-sample'
ALIYUNHUB_NAMESPACE = 'lfy_hello'
}
parameters {
string(name: 'TAG_NAME', defaultValue: '', description: '')
}
}
deploy
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ruoyi-auth
name: ruoyi-auth
namespace: ruoyi #一定要写名称空间
spec:
progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
app: ruoyi-auth
strategy:
rollingUpdate:
maxSurge: 50%
maxUnavailable: 50%
type: RollingUpdate
template:
metadata:
labels:
app: ruoyi-auth
spec:
volumes:
- name: host-time
hostPath:
path: /etc/localtime
type: ''
imagePullSecrets:
- name: aliyun-docker #提前在项目下配置访问阿里云的账号密码
containers:
- image: registry.cn-chengdu.aliyuncs.com/wkl_ruoyi/ruoyi-auth:latest
# readinessProbe:
# httpGet:
# path: /actuator/health
# port: 8080
# timeoutSeconds: 10
# failureThreshold: 30
# periodSeconds: 5
imagePullPolicy: IfNotPresent #Always
name: app
volumeMounts:
- name: host-time
readOnly: true
mountPath: /etc/localtime
ports:
- containerPort: 9200
protocol: TCP
env:
- name: NACOS_ADDR
value: nacos-dns.ruoyi
resources:
limits:
cpu: 300m
memory: 600Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ruoyi-auth
name: ruoyi-auth
namespace: ruoyi
spec:
ports:
- name: http
port: 9200
protocol: TCP
targetPort: 9200
selector:
app: ruoyi-auth
sessionAffinity: None
type: ClusterIP