Skip to content

Commit

Permalink
chore(*): update election example (kubernetes#82821)
Browse files Browse the repository at this point in the history
* feat(*): simplify leader election example

* chore(*): fix with comment

* chore(*): add os.exit

* chore(*): fix typo
  • Loading branch information
Mengjiang Bao authored and k8s-ci-robot committed Oct 11, 2019
1 parent f985367 commit 1185012
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 45 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
"//staging/src/k8s.io/client-go/transport:go_default_library",
"//vendor/github.com/google/uuid:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
Expand Down
11 changes: 6 additions & 5 deletions staging/src/k8s.io/client-go/examples/leader-election/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,16 @@ This example demonstrates how to use the leader election package.
Run the following three commands in separate terminals. Each terminal needs a unique `id`.

```bash
# first terminal
go run *.go -kubeconfig=/my/config -logtostderr=true -id=1
# first terminal
go run main.go -kubeconfig=/path/to/kubeconfig -logtostderr=true -lease-lock-name=example -lease-lock-namespace=default -id=1

# second terminal
go run *.go -kubeconfig=/my/config -logtostderr=true -id=2
# second terminal
go run main.go -kubeconfig=/path/to/kubeconfig -logtostderr=true -lease-lock-name=example -lease-lock-namespace=default -id=2

# third terminal
go run *.go -kubeconfig=/my/config -logtostderr=true -id=3
go run main.go -kubeconfig=/path/to/kubeconfig -logtostderr=true -lease-lock-name=example -lease-lock-namespace=default -id=3
```

> You can ignore the `-kubeconfig` flag if you are running these commands in the Kubernetes cluster.
Now kill the existing leader. You will see from the terminal outputs that one of the remaining two processes will be elected as the new leader.
72 changes: 33 additions & 39 deletions staging/src/k8s.io/client-go/examples/leader-election/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,21 +19,18 @@ package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"strings"
"syscall"
"time"

"github.com/google/uuid"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/transport"
"k8s.io/klog"
)

Expand Down Expand Up @@ -62,13 +59,16 @@ func main() {
var id string

flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
flag.StringVar(&id, "id", "", "the holder identity name")
flag.StringVar(&leaseLockName, "lease-lock-name", "example", "the lease lock resource name")
flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", "default", "the lease lock resource namespace")
flag.StringVar(&id, "id", uuid.New().String(), "the holder identity name")
flag.StringVar(&leaseLockName, "lease-lock-name", "", "the lease lock resource name")
flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", "", "the lease lock resource namespace")
flag.Parse()

if id == "" {
klog.Fatal("unable to get id (missing id flag).")
if leaseLockName == "" {
klog.Fatal("unable to get lease lock resource name (missing lease-lock-name flag).")
}
if leaseLockNamespace == "" {
klog.Fatal("unable to get lease lock resource namespace (missing lease-lock-namespace flag).")
}

// leader election uses the Kubernetes API by writing to a
Expand All @@ -82,38 +82,42 @@ func main() {
}
client := clientset.NewForConfigOrDie(config)

// we use the Lease lock type since edits to Leases are less common
// and fewer objects in the cluster watch "all Leases".
lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: leaseLockName,
Namespace: leaseLockNamespace,
},
Client: client.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
},
run := func(ctx context.Context) {
// complete your controller loop here
klog.Info("Controller loop...")

select {}
}

// use a Go context so we can tell the leaderelection code when we
// want to step down
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

// use a client that will stop allowing new requests once the context ends
config.Wrap(transport.ContextCanceller(ctx, fmt.Errorf("the leader is shutting down")))

// listen for interrupts or the Linux SIGTERM signal and cancel
// our context, which the leader election code will observe and
// step down
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
go func() {
<-ch
log.Printf("Received termination, signaling shutdown")
klog.Info("Received termination, signaling shutdown")
cancel()
}()

// we use the Lease lock type since edits to Leases are less common
// and fewer objects in the cluster watch "all Leases".
lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: leaseLockName,
Namespace: leaseLockNamespace,
},
Client: client.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
},
}

// start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
Expand All @@ -131,31 +135,21 @@ func main() {
OnStartedLeading: func(ctx context.Context) {
// we're notified when we start - this is where you would
// usually put your code
klog.Infof("%s: leading", id)
run(ctx)
},
OnStoppedLeading: func() {
// we can do cleanup here, or after the RunOrDie method
// returns
klog.Infof("%s: lost", id)
// we can do cleanup here
klog.Infof("leader lost: %s", id)
os.Exit(0)
},
OnNewLeader: func(identity string) {
// we're notified when new leader elected
if identity == id {
// I just got the lock
return
}
klog.Infof("new leader elected: %v", identity)
klog.Infof("new leader elected: %s", identity)
},
},
})

// because the context is closed, the client should report errors
_, err = client.CoordinationV1().Leases(leaseLockNamespace).Get(leaseLockName, metav1.GetOptions{})
if err == nil || !strings.Contains(err.Error(), "the leader is shutting down") {
log.Fatalf("%s: expected to get an error when trying to make a client call: %v", id, err)
}

// we no longer hold the lease, so perform any cleanup and then
// exit
log.Printf("%s: done", id)
}
1 change: 1 addition & 0 deletions staging/src/k8s.io/client-go/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ require (
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903
github.com/golang/protobuf v1.3.1
github.com/google/gofuzz v1.0.0
github.com/google/uuid v1.1.1
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d
github.com/gophercloud/gophercloud v0.1.0
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7
Expand Down
1 change: 1 addition & 0 deletions staging/src/k8s.io/client-go/go.sum

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 1185012

Please sign in to comment.