在前面的文章 Kubernetes中基于二层的负载均衡实现 有提到过使用 leader 选举来实现高可用,下面是对应的参考代码
package main
import (
"context"
"os"
"os/signal"
"syscall"
"time"
"github.com/golang/glog"
"github.com/google/uuid"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
func main() {
defer glog.Flush()
// 当前 pod 运行的 namespace,推荐从 downward api 读取
namespace := "default"
// 使用随机生成的 uuid,也可以使用 pod_name 作为注册的唯一标识
uuid, _ := uuid.NewUUID()
id := uuid.String()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
config, err := rest.InClusterConfig()
client := kubernetes.NewForConfigOrDie(config)
stop := make(chan struct{})
stopCh := make(chan os.Signal, 1)
signal.Notify(stopCh, os.Interrupt, syscall.SIGTERM)
go func() {
<-stopCh
glog.Info("Received termination, signaling shutdown")
close(stop)
cancel()
}()
run := func(ctx context.Context, stop <-chan struct{}) {
sharedInformerFactory := informers.NewSharedInformerFactory(client, 5*time.Minute)
// 业务相关
glog.Infof("start shared informer factory")
sharedInformerFactory.Start(stop)
<-stopCh
}
// 创建一个资源锁
lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: "app-xxx-lockname",
Namespace: namespace,
},
Client: client.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
},
}
// 开启一个选举的循环
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: 60 * time.Second,
RenewDeadline: 15 * time.Second,
RetryPeriod: 5 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
// 当选举为leader后所运行的业务逻辑
run(ctx, stop)
},
OnStoppedLeading: func() {
// we can do cleanup here
glog.Infof("leader lost: %s", id)
os.Exit(0)
},
OnNewLeader: func(identity string) { // 申请一个选举时的动作
if identity == id {
return
}
glog.Infof("new leader elected: %s", identity)
},
},
})
}