centos7安装部署etcd集群

ETCD集群节点:

角色 系统 节点
master CentOS-7 192.168.10.5
node-1 CentOS-7 192.168.10.6
node-2 CentOS-7 192.168.10.7

yum安装etcd

#配置阿里epel源:

mv /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup

mv /etc/yum.repos.d/epel-testing.repo /etc/yum.repos.d/epel-testing.repo.backup

wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum install etcd

yum list installed |grep -i etcd

Master配置修改介绍

[root@master etcd]# egrep -v "^$|^#" /etc/etcd/etcd.conf

ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #etcd数据保存目录
ETCD_LISTEN_PEER_URLS="http://192.168.10.5:2380" #集群内部通信使用的URL
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" #供外部客户端使用的url
ETCD_NAME="etcd01" #etcd实例名称
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.5:2380" #广播给集群内其他成员访问的URL
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379" #广播给外部客户端使用的url
ETCD_INITIAL_CLUSTER="etcd01=http://192.168.10.5:2380,etcd02=http://192.168.10.6:2380,etcd03=http://192.168.10.7:2380" #初始集群成员列表
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #集群的名称
ETCD_INITIAL_CLUSTER_STATE="new" #初始集群状态,new为新建集群

node1 etcd 配置

[root@node1 etcd]# egrep -v "^$|^#" /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.10.6:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="etcd02"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.6:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_CLUSTER="etcd01=http://192.168.10.5:2380,etcd02=http://192.168.10.6:2380,etcd03=http://192.168.10.7:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="exist"

node2 etcd 配置

[root@node2 ~]# egrep -v "^$|^#" /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.10.7:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="etcd03"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.7:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_INITIAL_CLUSTER="etcd01=http://192.168.10.5:2380,etcd02=http://192.168.10.6:2380,etcd03=http://192.168.10.7:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="exist"

添加系统服务命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@localhost etcd]# cat /usr/lib/systemd/system/etcd.service
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd \
--name=\"${ETCD_NAME}\" \
--data-dir=\"${ETCD_DATA_DIR}\" \
--listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" \
--listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" \
--initial-advertise-peer-urls=\"${ETCD_INITIAL_ADVERTISE_PEER_URLS}\" \
--advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" \
--initial-cluster=\"${ETCD_INITIAL_CLUSTER}\"  \
--initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" \
--initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\""
Restart=on-failure
LimitNOFILE=65536

以上操作在 master、node2、node3 节点上都需要操作,需要修改对应的ip

启动 etcd

依次启动 node1、node2、node3 节点的 etcd

1
2
3
systemctl start etcd.service
systemctl status etcd.service
systemctl enable etcd.service

验证 etcd集群

# 查看集群节点
[root@master etcd]# etcdctl member list
315fd62e577c4037: name=etcd03 peerURLs=http://192.168.10.7:2380 clientURLs=http://0.0.0.0:2379 isLeader=false
4c5d727d37966a87: name=etcd01 peerURLs=http://192.168.10.5:2380 clientURLs=http://0.0.0.0:2379 isLeader=true
f617da66fb9b90ad: name=etcd02 peerURLs=http://192.168.10.6:2380 clientURLs=http://0.0.0.0:2379 isLeader=false

#查看集群健康状态:

[root@master etcd]# etcdctl cluster-health
member 315fd62e577c4037 is healthy: got healthy result from http://0.0.0.0:2379
member 4c5d727d37966a87 is healthy: got healthy result from http://0.0.0.0:2379
member f617da66fb9b90ad is healthy: got healthy result from http://0.0.0.0:2379
cluster is healthy


设置键值:

在一个节点设置值
[root@master etcd]# etcdctl set /test/key "test kubernetes"
test kubernetes
 
在另一个节点获取值
[root@node1 etcd]# etcdctl get /test/key
test kubernetes

key存在的方式和zookeeper类似,为 /路径/key
设置完之后,其他集群也可以查询到该值
如果dir和key不存在,该命令会创建对应的项

更新键值:

[root@master etcd]# etcdctl update /test/key "test kubernetes cluster"
test kubernetes cluster
 
[root@node1 etcd]# etcdctl get /test/key
test kubernetes cluster

删除键值:

[root@master etcd]# etcdctl rm /test/key
PrevNode.Value: test kubernetes cluster
 
# 当键不存在时,会报错
[root@node1 etcd]# etcdctl get /test/key
Error:  100: Key not found (/test/key) [15]

通过api接口验证

验证集群的状态,从三个node返回的v2/members数据是一样的值。

curl -L http://127.0.0.1:2380/v2/members

通过curl添加查询信息

curl -L http://127.0.0.1:32789/v2/keys/foo -XPUT -d value="Hello foo"
curl -L http://127.0.0.1:32789/v2/keys/foo1/foo1 -XPUT -d value="Hello foo1"
curl -L http://127.0.0.1:32789/v2/keys/foo2/foo2 -XPUT -d value="Hello foo2"
curl -L http://127.0.0.1:32789/v2/keys/foo2/foo21/foo21 -XPUT -d value="Hello foo21"

curl -L http://127.0.0.1:32787/v2/keys/foo
curl -L http://127.0.0.1:32787/v2/keys/foo2
curl -L http://127.0.0.1:32787/v2/keys/foo2?recursive=true

发表评论