k8s部署elasticsearch7.9.1集群

2022年 12月 6日 84.1k 0

部署示意图

k8s搭建es服务集群示意图

service1、service2、service3用于集群内服务互通,service用于集群服务对外提供访问。

使用自定义存储卷

创建三个nfs pvc

通过管理界面创建3个PVC(es-nfs-pvc-1,es-nfs-pvc-2,es-nfs-pvc-3)

创建pod

statefulset的部署文件 es-statefulset.yaml

apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-sts-0
namespace: ns-elasticsearch
spec:
selector:
  matchLabels:
    app: es  
    es-cluster: es01
serviceName: "elasticsearch-svc"  
replicas: 1
template:
  metadata:
    labels:
      app: es
      es-cluster: es01
  spec:
    terminationGracePeriodSeconds: 10
    volumes:
      - name: data-storage
        persistentVolumeClaim:
          claimName: es-nfs-pvc-1 #使用第一块存储卷
    containers:
    - name: elasticsearch
      image: registry.hjy.com/hjytest/elasticsearch:7.9.1
      imagePullPolicy: IfNotPresent
      ports:
      - containerPort: 9200
        name: es-cli
      - containerPort: 9300
        name: es-inner
      env:
      - name: cluster.name
        value: es-cluster
      - name: node.name
        value: es01
      - name: cluster.initial_master_nodes #用于第一次启动时引导集群的创建,集群组建成功后就可以不需要了
        value: es01,es02,es03  #对应各个es的node.name
      - name: discovery.seed_hosts
        value: es-svc-inner-02,es-svc-inner-02
      - name: http.cors.enabled
        value: "true"
      - name: http.cors.allow-origin
        value: "*"
      volumeMounts:
        - mountPath: "/usr/share/elasticsearch/data"
          name: data-storage

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-sts-02
namespace: ns-elasticsearch
spec:
selector:
  matchLabels:
    app: es  
    es-cluster: es02
serviceName: "elasticsearch-svc"  
replicas: 1  
template:
  metadata:
    labels:
      app: es  
      es-cluster: es02
  spec:
    terminationGracePeriodSeconds: 10
    volumes:
      - name: data-storage
        persistentVolumeClaim:
          claimName: es-nfs-pvc-2 #使用第二块存储卷
    containers:
    - name: es-02
      image: registry.hjy.com/hjytest/elasticsearch:7.9.1
      imagePullPolicy: IfNotPresent
      ports:
      - containerPort: 9200
        name: es-cli
      - containerPort: 9300
        name: es-inner
      env:
      - name: cluster.name
        value: es-cluster
      - name: node.name
        value: es02
      - name: cluster.initial_master_nodes  # 最好每个都要配置这个列表,而且要与es01配置的一致
        value: es01,es02,es03
      - name: discovery.seed_hosts
        value: es-svc-inner-01,es-svc-inner-03
      - name: http.cors.enabled
        value: "true"
      - name: http.cors.allow-origin
        value: "*"
      volumeMounts:
        - mountPath: "/usr/share/elasticsearch/data"
          name: data-storage
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-sts-03
namespace: ns-elasticsearch
spec:
selector:
  matchLabels:
    app: es
    es-cluster: es03
serviceName: "elasticsearch-svc"  
replicas: 1
template:
  metadata:
    labels:
      app: es
      es-cluster: es03
  spec:
    terminationGracePeriodSeconds: 10
    volumes:
      - name: data-storage
        persistentVolumeClaim:
          claimName: es-nfs-pvc-3  #使用第三块存储卷
    containers:
    - name: es-03
      image: registry.hjy.com/hjytest/elasticsearch:7.9.1
      imagePullPolicy: IfNotPresent
      ports:
      - containerPort: 9200
        name: es-cli
      - containerPort: 9300
        name: es-iner
      env:
      - name: cluster.name
        value: es-cluster
      - name: node.name
        value: es03
      - name: cluster.initial_master_nodes #   最好每个都要配置这个列表,而且要与es01配置的一致
      - name: discovery.seed_hosts
        value: es-svc-inner-01,es-svc-inner-02
      - name: http.cors.enabled
        value: "true"
      - name: http.cors.allow-origin
        value: "*"
      volumeMounts:
        - mountPath: "/usr/share/elasticsearch/data"
          name: data-storage

注:存储卷的部署不成功的问题,提供下思路:需要对应的服务器上启动nfs服务和相关路径的读写权限。

Service的部署:es-service.yaml

##用于对外提供服务的Service
apiVersion: v1
kind: Service
metadata:
name: elasticsearch-svc
namespace: ns-elasticsearch
labels:
  app: elasticsearch-svc
spec:
type: NodePort
ports:
- port: 9200
  name: es-cli
  nodePort: 32001
selector:
  app: es

---
##用于集群内部服务间互通的Service
apiVersion: v1
kind: Service
metadata:
name: es-svc-inner-01
namespace: ns-elasticsearch
labels:
  app: es-svc-inner-01
spec:
type: ClusterIP
ports:
- port: 9300
  name: es-port
selector:
  es-cluster: es01

---
apiVersion: v1
kind: Service
metadata:
name: es-svc-inner-02
namespace: ns-elasticsearch
labels:
  app: es-svc-inner-02
spec:
type: ClusterIP
ports:
- port: 9300
  name: es-port
selector:
  es-cluster: es02

---
apiVersion: v1
kind: Service
metadata:
name: es-svc-inner-03
namespace: ns-elasticsearch
labels:
  app: es-svc-inner-03
spec:
type: ClusterIP
ports:
- port: 9300
  name: es-port
selector:
  es-cluster: es03

执行部署

[root@k8s-master1 k8syaml]# kubectl apply -f es791.yaml
statefulset.apps/es-sts-0 created
statefulset.apps/es-sts-02 created
statefulset.apps/es-sts-03 created
[root@k8s-master1 k8syaml]# kubectl apply -f es-service.yaml
service/elasticsearch-svc created
service/es-svc-inner-01 created
service/es-svc-inner-02 created
service/es-svc-inner-03 created

[root@k8s-master1 k8syaml]# kubectl get pods --namespace ns-elasticsearch -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
es-sts-0-0 1/1 Running 0 13s 10.244.6.32 k8s-node4
es-sts-02-0 1/1 Running 0 22m 10.244.4.50 k8s-node2
es-sts-03-0 1/1 Running 0 22m 10.244.3.28 k8s-node1

[root@k8s-master1 k8syaml]# kubectl get svc --namespace ns-elasticsearch -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
es-svc-inner-01 ClusterIP 10.96.50.14 9300/TCP 21m es-cluster=es01
es-svc-inner-02 ClusterIP 10.96.185.72 9300/TCP 21m es-cluster=es02
es-svc-inner-03 ClusterIP 10.96.163.153 9300/TCP 21m es-cluster=es03

LKE 平台查看

设置elasticsearch 账号密码

设置密码

./elasticsearch-7.9.1/bin/elasticsearch-setup-passwords interactive 依次输入密码就行(便于记忆我统一了密码)

经验证,需要搭建三个es实例才能保证其中一个挂了之后集群还能正常运行,官方也推荐最好配置三个以上的mater node,以防数据的丢失。

ps: Elasticsearch默认单机情况下会自动引导组建集群,但是只要配置了 discovery.seed_providers discovery.seed_hosts cluster.initial_master_nodes 三个中的一个Elasticsearch就不会自动引导集群了。

遇到的问题,有个节点pod启动失败,报如下错误:

max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]

解决:

解决办法: 修改配置sysctl.conf sudo vi /etc/sysctl.conf

添加下面配置: vm.max_map_count = 262144

并执行命令: sysctl -p

然后,重新启动elasticsearch,即可启动成功。

相关文章

KubeSphere 部署向量数据库 Milvus 实战指南
探索 Kubernetes 持久化存储之 Longhorn 初窥门径
征服 Docker 镜像访问限制!KubeSphere v3.4.1 成功部署全攻略
那些年在 Terraform 上吃到的糖和踩过的坑
无需 Kubernetes 测试 Kubernetes 网络实现
Kubernetes v1.31 中的移除和主要变更

发布评论